Fix an uninitialized variable
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85                                          struct cli_state *cli,
86                                          const struct ndr_syntax_id *interface)
87 {
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         return NULL;
102 }
103
104 /********************************************************************
105  Map internal value to wire value.
106  ********************************************************************/
107
108 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
109 {
110         switch (auth_type) {
111
112         case PIPE_AUTH_TYPE_NONE:
113                 return RPC_ANONYMOUS_AUTH_TYPE;
114
115         case PIPE_AUTH_TYPE_NTLMSSP:
116                 return RPC_NTLMSSP_AUTH_TYPE;
117
118         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
119         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
120                 return RPC_SPNEGO_AUTH_TYPE;
121
122         case PIPE_AUTH_TYPE_SCHANNEL:
123                 return RPC_SCHANNEL_AUTH_TYPE;
124
125         case PIPE_AUTH_TYPE_KRB5:
126                 return RPC_KRB5_AUTH_TYPE;
127
128         default:
129                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
130                         "auth type %u\n",
131                         (unsigned int)auth_type ));
132                 break;
133         }
134         return -1;
135 }
136
137 /********************************************************************
138  Pipe description for a DEBUG
139  ********************************************************************/
140 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
141 {
142         char *result;
143
144         switch (cli->transport_type) {
145         case NCACN_NP:
146                 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
147                                          "fnum 0x%x",
148                                          cli->desthost,
149                                          cli->trans.np.pipe_name,
150                                          (unsigned int)(cli->trans.np.fnum));
151                 break;
152         case NCACN_IP_TCP:
153         case NCACN_UNIX_STREAM:
154                 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
155                                          cli->desthost, cli->trans.sock.fd);
156                 break;
157         default:
158                 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
159                 break;
160         }
161         SMB_ASSERT(result != NULL);
162         return result;
163 }
164
165 /********************************************************************
166  Rpc pipe call id.
167  ********************************************************************/
168
169 static uint32 get_rpc_call_id(void)
170 {
171         static uint32 call_id = 0;
172         return ++call_id;
173 }
174
175 /*
176  * Realloc pdu to have a least "size" bytes
177  */
178
179 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
180 {
181         size_t extra_size;
182
183         if (prs_data_size(pdu) >= size) {
184                 return true;
185         }
186
187         extra_size = size - prs_data_size(pdu);
188
189         if (!prs_force_grow(pdu, extra_size)) {
190                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
191                           "%d bytes.\n", (int)extra_size));
192                 return false;
193         }
194
195         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
196                   (int)extra_size, prs_data_size(pdu)));
197         return true;
198 }
199
200
201 /*******************************************************************
202  Use SMBreadX to get rest of one fragment's worth of rpc data.
203  Reads the whole size or give an error message
204  ********************************************************************/
205
206 struct rpc_read_state {
207         struct event_context *ev;
208         struct rpc_pipe_client *cli;
209         char *data;
210         size_t size;
211         size_t num_read;
212 };
213
214 static void rpc_read_np_done(struct async_req *subreq);
215 static void rpc_read_sock_done(struct async_req *subreq);
216
217 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
218                                        struct event_context *ev,
219                                        struct rpc_pipe_client *cli,
220                                        char *data, size_t size)
221 {
222         struct async_req *result, *subreq;
223         struct rpc_read_state *state;
224
225         result = async_req_new(mem_ctx);
226         if (result == NULL) {
227                 return NULL;
228         }
229         state = talloc(result, struct rpc_read_state);
230         if (state == NULL) {
231                 goto fail;
232         }
233         result->private_data = state;
234
235         state->ev = ev;
236         state->cli = cli;
237         state->data = data;
238         state->size = size;
239         state->num_read = 0;
240
241         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
242
243         if (cli->transport_type == NCACN_NP) {
244                 subreq = cli_read_andx_send(
245                         state, ev, cli->trans.np.cli,
246                         cli->trans.np.fnum, 0, size);
247                 if (subreq == NULL) {
248                         DEBUG(10, ("cli_read_andx_send failed\n"));
249                         goto fail;
250                 }
251                 subreq->async.fn = rpc_read_np_done;
252                 subreq->async.priv = result;
253                 return result;
254         }
255
256         if ((cli->transport_type == NCACN_IP_TCP)
257             || (cli->transport_type == NCACN_UNIX_STREAM)) {
258                 subreq = recvall_send(state, ev, cli->trans.sock.fd,
259                                       data, size, 0);
260                 if (subreq == NULL) {
261                         DEBUG(10, ("recvall_send failed\n"));
262                         goto fail;
263                 }
264                 subreq->async.fn = rpc_read_sock_done;
265                 subreq->async.priv = result;
266                 return result;
267         }
268
269         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
270                 return result;
271         }
272  fail:
273         TALLOC_FREE(result);
274         return NULL;
275 }
276
277 static void rpc_read_np_done(struct async_req *subreq)
278 {
279         struct async_req *req = talloc_get_type_abort(
280                 subreq->async.priv, struct async_req);
281         struct rpc_read_state *state = talloc_get_type_abort(
282                 req->private_data, struct rpc_read_state);
283         NTSTATUS status;
284         ssize_t received;
285         uint8_t *rcvbuf;
286
287         status = cli_read_andx_recv(subreq, &received, &rcvbuf);
288         /*
289          * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
290          * child of that.
291          */
292         if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
293                 status = NT_STATUS_OK;
294         }
295         if (!NT_STATUS_IS_OK(status)) {
296                 TALLOC_FREE(subreq);
297                 async_req_error(req, status);
298                 return;
299         }
300
301         memcpy(state->data + state->num_read, rcvbuf, received);
302         TALLOC_FREE(subreq);
303
304         state->num_read += received;
305
306         if (state->num_read == state->size) {
307                 async_req_done(req);
308                 return;
309         }
310
311         subreq = cli_read_andx_send(
312                 state, state->ev, state->cli->trans.np.cli,
313                 state->cli->trans.np.fnum, 0,
314                 state->size - state->num_read);
315
316         if (async_req_nomem(subreq, req)) {
317                 return;
318         }
319
320         subreq->async.fn = rpc_read_np_done;
321         subreq->async.priv = req;
322 }
323
324 static void rpc_read_sock_done(struct async_req *subreq)
325 {
326         struct async_req *req = talloc_get_type_abort(
327                 subreq->async.priv, struct async_req);
328         NTSTATUS status;
329
330         status = recvall_recv(subreq);
331         TALLOC_FREE(subreq);
332         if (!NT_STATUS_IS_OK(status)) {
333                 async_req_error(req, status);
334                 return;
335         }
336
337         async_req_done(req);
338 }
339
340 static NTSTATUS rpc_read_recv(struct async_req *req)
341 {
342         return async_req_simple_recv(req);
343 }
344
345 struct rpc_write_state {
346         struct event_context *ev;
347         struct rpc_pipe_client *cli;
348         const char *data;
349         size_t size;
350         size_t num_written;
351 };
352
353 static void rpc_write_np_done(struct async_req *subreq);
354 static void rpc_write_sock_done(struct async_req *subreq);
355
356 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
357                                         struct event_context *ev,
358                                         struct rpc_pipe_client *cli,
359                                         const char *data, size_t size)
360 {
361         struct async_req *result, *subreq;
362         struct rpc_write_state *state;
363
364         result = async_req_new(mem_ctx);
365         if (result == NULL) {
366                 return NULL;
367         }
368         state = talloc(result, struct rpc_write_state);
369         if (state == NULL) {
370                 goto fail;
371         }
372         result->private_data = state;
373
374         state->ev = ev;
375         state->cli = cli;
376         state->data = data;
377         state->size = size;
378         state->num_written = 0;
379
380         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
381
382         if (cli->transport_type == NCACN_NP) {
383                 subreq = cli_write_andx_send(
384                         state, ev, cli->trans.np.cli,
385                         cli->trans.np.fnum, 8, /* 8 means message mode. */
386                         (uint8_t *)data, 0, size);
387                 if (subreq == NULL) {
388                         DEBUG(10, ("cli_write_andx_send failed\n"));
389                         goto fail;
390                 }
391                 subreq->async.fn = rpc_write_np_done;
392                 subreq->async.priv = result;
393                 return result;
394         }
395
396         if ((cli->transport_type == NCACN_IP_TCP)
397             || (cli->transport_type == NCACN_UNIX_STREAM)) {
398                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
399                                       data, size, 0);
400                 if (subreq == NULL) {
401                         DEBUG(10, ("sendall_send failed\n"));
402                         goto fail;
403                 }
404                 subreq->async.fn = rpc_write_sock_done;
405                 subreq->async.priv = result;
406                 return result;
407         }
408
409         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
410                 return result;
411         }
412  fail:
413         TALLOC_FREE(result);
414         return NULL;
415 }
416
417 static void rpc_write_np_done(struct async_req *subreq)
418 {
419         struct async_req *req = talloc_get_type_abort(
420                 subreq->async.priv, struct async_req);
421         struct rpc_write_state *state = talloc_get_type_abort(
422                 req->private_data, struct rpc_write_state);
423         NTSTATUS status;
424         size_t written;
425
426         status = cli_write_andx_recv(subreq, &written);
427         TALLOC_FREE(subreq);
428         if (!NT_STATUS_IS_OK(status)) {
429                 async_req_error(req, status);
430                 return;
431         }
432
433         state->num_written += written;
434
435         if (state->num_written == state->size) {
436                 async_req_done(req);
437                 return;
438         }
439
440         subreq = cli_write_andx_send(
441                 state, state->ev, state->cli->trans.np.cli,
442                 state->cli->trans.np.fnum, 8,
443                 (uint8_t *)(state->data + state->num_written),
444                 0, state->size - state->num_written);
445
446         if (async_req_nomem(subreq, req)) {
447                 return;
448         }
449
450         subreq->async.fn = rpc_write_np_done;
451         subreq->async.priv = req;
452 }
453
454 static void rpc_write_sock_done(struct async_req *subreq)
455 {
456         struct async_req *req = talloc_get_type_abort(
457                 subreq->async.priv, struct async_req);
458         NTSTATUS status;
459
460         status = sendall_recv(subreq);
461         TALLOC_FREE(subreq);
462         if (!NT_STATUS_IS_OK(status)) {
463                 async_req_error(req, status);
464                 return;
465         }
466
467         async_req_done(req);
468 }
469
470 static NTSTATUS rpc_write_recv(struct async_req *req)
471 {
472         return async_req_simple_recv(req);
473 }
474
475
476 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
477                                  struct rpc_hdr_info *prhdr,
478                                  prs_struct *pdu)
479 {
480         /*
481          * This next call sets the endian bit correctly in current_pdu. We
482          * will propagate this to rbuf later.
483          */
484
485         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
486                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
487                 return NT_STATUS_BUFFER_TOO_SMALL;
488         }
489
490         if (prhdr->frag_len > cli->max_recv_frag) {
491                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
492                           " we only allow %d\n", (int)prhdr->frag_len,
493                           (int)cli->max_recv_frag));
494                 return NT_STATUS_BUFFER_TOO_SMALL;
495         }
496
497         return NT_STATUS_OK;
498 }
499
500 /****************************************************************************
501  Try and get a PDU's worth of data from current_pdu. If not, then read more
502  from the wire.
503  ****************************************************************************/
504
505 struct get_complete_frag_state {
506         struct event_context *ev;
507         struct rpc_pipe_client *cli;
508         struct rpc_hdr_info *prhdr;
509         prs_struct *pdu;
510 };
511
512 static void get_complete_frag_got_header(struct async_req *subreq);
513 static void get_complete_frag_got_rest(struct async_req *subreq);
514
515 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
516                                                struct event_context *ev,
517                                                struct rpc_pipe_client *cli,
518                                                struct rpc_hdr_info *prhdr,
519                                                prs_struct *pdu)
520 {
521         struct async_req *result, *subreq;
522         struct get_complete_frag_state *state;
523         uint32_t pdu_len;
524         NTSTATUS status;
525
526         result = async_req_new(mem_ctx);
527         if (result == NULL) {
528                 return NULL;
529         }
530         state = talloc(result, struct get_complete_frag_state);
531         if (state == NULL) {
532                 goto fail;
533         }
534         result->private_data = state;
535
536         state->ev = ev;
537         state->cli = cli;
538         state->prhdr = prhdr;
539         state->pdu = pdu;
540
541         pdu_len = prs_data_size(pdu);
542         if (pdu_len < RPC_HEADER_LEN) {
543                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
544                         status = NT_STATUS_NO_MEMORY;
545                         goto post_status;
546                 }
547                 subreq = rpc_read_send(state, state->ev, state->cli,
548                                        prs_data_p(state->pdu) + pdu_len,
549                                        RPC_HEADER_LEN - pdu_len);
550                 if (subreq == NULL) {
551                         status = NT_STATUS_NO_MEMORY;
552                         goto post_status;
553                 }
554                 subreq->async.fn = get_complete_frag_got_header;
555                 subreq->async.priv = result;
556                 return result;
557         }
558
559         status = parse_rpc_header(cli, prhdr, pdu);
560         if (!NT_STATUS_IS_OK(status)) {
561                 goto post_status;
562         }
563
564         /*
565          * Ensure we have frag_len bytes of data.
566          */
567         if (pdu_len < prhdr->frag_len) {
568                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
569                         status = NT_STATUS_NO_MEMORY;
570                         goto post_status;
571                 }
572                 subreq = rpc_read_send(state, state->ev, state->cli,
573                                        prs_data_p(pdu) + pdu_len,
574                                        prhdr->frag_len - pdu_len);
575                 if (subreq == NULL) {
576                         status = NT_STATUS_NO_MEMORY;
577                         goto post_status;
578                 }
579                 subreq->async.fn = get_complete_frag_got_rest;
580                 subreq->async.priv = result;
581                 return result;
582         }
583
584         status = NT_STATUS_OK;
585  post_status:
586         if (async_post_status(result, ev, status)) {
587                 return result;
588         }
589  fail:
590         TALLOC_FREE(result);
591         return NULL;
592 }
593
594 static void get_complete_frag_got_header(struct async_req *subreq)
595 {
596         struct async_req *req = talloc_get_type_abort(
597                 subreq->async.priv, struct async_req);
598         struct get_complete_frag_state *state = talloc_get_type_abort(
599                 req->private_data, struct get_complete_frag_state);
600         NTSTATUS status;
601
602         status = rpc_read_recv(subreq);
603         TALLOC_FREE(subreq);
604         if (!NT_STATUS_IS_OK(status)) {
605                 async_req_error(req, status);
606                 return;
607         }
608
609         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
610         if (!NT_STATUS_IS_OK(status)) {
611                 async_req_error(req, status);
612                 return;
613         }
614
615         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
616                 async_req_error(req, NT_STATUS_NO_MEMORY);
617                 return;
618         }
619
620         /*
621          * We're here in this piece of code because we've read exactly
622          * RPC_HEADER_LEN bytes into state->pdu.
623          */
624
625         subreq = rpc_read_send(state, state->ev, state->cli,
626                                prs_data_p(state->pdu) + RPC_HEADER_LEN,
627                                state->prhdr->frag_len - RPC_HEADER_LEN);
628         if (async_req_nomem(subreq, req)) {
629                 return;
630         }
631         subreq->async.fn = get_complete_frag_got_rest;
632         subreq->async.priv = req;
633 }
634
635 static void get_complete_frag_got_rest(struct async_req *subreq)
636 {
637         struct async_req *req = talloc_get_type_abort(
638                 subreq->async.priv, struct async_req);
639         NTSTATUS status;
640
641         status = rpc_read_recv(subreq);
642         TALLOC_FREE(subreq);
643         if (!NT_STATUS_IS_OK(status)) {
644                 async_req_error(req, status);
645                 return;
646         }
647         async_req_done(req);
648 }
649
650 static NTSTATUS get_complete_frag_recv(struct async_req *req)
651 {
652         return async_req_simple_recv(req);
653 }
654
655 /****************************************************************************
656  NTLMSSP specific sign/seal.
657  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
658  In fact I should probably abstract these into identical pieces of code... JRA.
659  ****************************************************************************/
660
661 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
662                                 prs_struct *current_pdu,
663                                 uint8 *p_ss_padding_len)
664 {
665         RPC_HDR_AUTH auth_info;
666         uint32 save_offset = prs_offset(current_pdu);
667         uint32 auth_len = prhdr->auth_len;
668         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
669         unsigned char *data = NULL;
670         size_t data_len;
671         unsigned char *full_packet_data = NULL;
672         size_t full_packet_data_len;
673         DATA_BLOB auth_blob;
674         NTSTATUS status;
675
676         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
677             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
678                 return NT_STATUS_OK;
679         }
680
681         if (!ntlmssp_state) {
682                 return NT_STATUS_INVALID_PARAMETER;
683         }
684
685         /* Ensure there's enough data for an authenticated response. */
686         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
687                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
688                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
689                         (unsigned int)auth_len ));
690                 return NT_STATUS_BUFFER_TOO_SMALL;
691         }
692
693         /*
694          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
695          * after the RPC header.
696          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
697          * functions as NTLMv2 checks the rpc headers also.
698          */
699
700         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
701         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
702
703         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
704         full_packet_data_len = prhdr->frag_len - auth_len;
705
706         /* Pull the auth header and the following data into a blob. */
707         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
708                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
709                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
710                 return NT_STATUS_BUFFER_TOO_SMALL;
711         }
712
713         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
714                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
715                 return NT_STATUS_BUFFER_TOO_SMALL;
716         }
717
718         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
719         auth_blob.length = auth_len;
720
721         switch (cli->auth->auth_level) {
722                 case PIPE_AUTH_LEVEL_PRIVACY:
723                         /* Data is encrypted. */
724                         status = ntlmssp_unseal_packet(ntlmssp_state,
725                                                         data, data_len,
726                                                         full_packet_data,
727                                                         full_packet_data_len,
728                                                         &auth_blob);
729                         if (!NT_STATUS_IS_OK(status)) {
730                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
731                                         "packet from %s. Error was %s.\n",
732                                         rpccli_pipe_txt(debug_ctx(), cli),
733                                         nt_errstr(status) ));
734                                 return status;
735                         }
736                         break;
737                 case PIPE_AUTH_LEVEL_INTEGRITY:
738                         /* Data is signed. */
739                         status = ntlmssp_check_packet(ntlmssp_state,
740                                                         data, data_len,
741                                                         full_packet_data,
742                                                         full_packet_data_len,
743                                                         &auth_blob);
744                         if (!NT_STATUS_IS_OK(status)) {
745                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
746                                         "packet from %s. Error was %s.\n",
747                                         rpccli_pipe_txt(debug_ctx(), cli),
748                                         nt_errstr(status) ));
749                                 return status;
750                         }
751                         break;
752                 default:
753                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
754                                   "auth level %d\n", cli->auth->auth_level));
755                         return NT_STATUS_INVALID_INFO_CLASS;
756         }
757
758         /*
759          * Return the current pointer to the data offset.
760          */
761
762         if(!prs_set_offset(current_pdu, save_offset)) {
763                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
764                         (unsigned int)save_offset ));
765                 return NT_STATUS_BUFFER_TOO_SMALL;
766         }
767
768         /*
769          * Remember the padding length. We must remove it from the real data
770          * stream once the sign/seal is done.
771          */
772
773         *p_ss_padding_len = auth_info.auth_pad_len;
774
775         return NT_STATUS_OK;
776 }
777
778 /****************************************************************************
779  schannel specific sign/seal.
780  ****************************************************************************/
781
782 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
783                                 prs_struct *current_pdu,
784                                 uint8 *p_ss_padding_len)
785 {
786         RPC_HDR_AUTH auth_info;
787         RPC_AUTH_SCHANNEL_CHK schannel_chk;
788         uint32 auth_len = prhdr->auth_len;
789         uint32 save_offset = prs_offset(current_pdu);
790         struct schannel_auth_struct *schannel_auth =
791                 cli->auth->a_u.schannel_auth;
792         uint32 data_len;
793
794         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
795             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
796                 return NT_STATUS_OK;
797         }
798
799         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
800                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
801                 return NT_STATUS_INVALID_PARAMETER;
802         }
803
804         if (!schannel_auth) {
805                 return NT_STATUS_INVALID_PARAMETER;
806         }
807
808         /* Ensure there's enough data for an authenticated response. */
809         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
810                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
811                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
812                         (unsigned int)auth_len ));
813                 return NT_STATUS_INVALID_PARAMETER;
814         }
815
816         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
817
818         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
819                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
820                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
821                 return NT_STATUS_BUFFER_TOO_SMALL;
822         }
823
824         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
825                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
826                 return NT_STATUS_BUFFER_TOO_SMALL;
827         }
828
829         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
830                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
831                         auth_info.auth_type));
832                 return NT_STATUS_BUFFER_TOO_SMALL;
833         }
834
835         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
836                                 &schannel_chk, current_pdu, 0)) {
837                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
838                 return NT_STATUS_BUFFER_TOO_SMALL;
839         }
840
841         if (!schannel_decode(schannel_auth,
842                         cli->auth->auth_level,
843                         SENDER_IS_ACCEPTOR,
844                         &schannel_chk,
845                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
846                         data_len)) {
847                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
848                                 "Connection to %s.\n",
849                                 rpccli_pipe_txt(debug_ctx(), cli)));
850                 return NT_STATUS_INVALID_PARAMETER;
851         }
852
853         /* The sequence number gets incremented on both send and receive. */
854         schannel_auth->seq_num++;
855
856         /*
857          * Return the current pointer to the data offset.
858          */
859
860         if(!prs_set_offset(current_pdu, save_offset)) {
861                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
862                         (unsigned int)save_offset ));
863                 return NT_STATUS_BUFFER_TOO_SMALL;
864         }
865
866         /*
867          * Remember the padding length. We must remove it from the real data
868          * stream once the sign/seal is done.
869          */
870
871         *p_ss_padding_len = auth_info.auth_pad_len;
872
873         return NT_STATUS_OK;
874 }
875
876 /****************************************************************************
877  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
878  ****************************************************************************/
879
880 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
881                                 prs_struct *current_pdu,
882                                 uint8 *p_ss_padding_len)
883 {
884         NTSTATUS ret = NT_STATUS_OK;
885
886         /* Paranioa checks for auth_len. */
887         if (prhdr->auth_len) {
888                 if (prhdr->auth_len > prhdr->frag_len) {
889                         return NT_STATUS_INVALID_PARAMETER;
890                 }
891
892                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
893                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
894                         /* Integer wrap attempt. */
895                         return NT_STATUS_INVALID_PARAMETER;
896                 }
897         }
898
899         /*
900          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
901          */
902
903         switch(cli->auth->auth_type) {
904                 case PIPE_AUTH_TYPE_NONE:
905                         if (prhdr->auth_len) {
906                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
907                                           "Connection to %s - got non-zero "
908                                           "auth len %u.\n",
909                                         rpccli_pipe_txt(debug_ctx(), cli),
910                                         (unsigned int)prhdr->auth_len ));
911                                 return NT_STATUS_INVALID_PARAMETER;
912                         }
913                         break;
914
915                 case PIPE_AUTH_TYPE_NTLMSSP:
916                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
917                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
918                         if (!NT_STATUS_IS_OK(ret)) {
919                                 return ret;
920                         }
921                         break;
922
923                 case PIPE_AUTH_TYPE_SCHANNEL:
924                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
925                         if (!NT_STATUS_IS_OK(ret)) {
926                                 return ret;
927                         }
928                         break;
929
930                 case PIPE_AUTH_TYPE_KRB5:
931                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
932                 default:
933                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
934                                   "to %s - unknown internal auth type %u.\n",
935                                   rpccli_pipe_txt(debug_ctx(), cli),
936                                   cli->auth->auth_type ));
937                         return NT_STATUS_INVALID_INFO_CLASS;
938         }
939
940         return NT_STATUS_OK;
941 }
942
943 /****************************************************************************
944  Do basic authentication checks on an incoming pdu.
945  ****************************************************************************/
946
947 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
948                         prs_struct *current_pdu,
949                         uint8 expected_pkt_type,
950                         char **ppdata,
951                         uint32 *pdata_len,
952                         prs_struct *return_data)
953 {
954
955         NTSTATUS ret = NT_STATUS_OK;
956         uint32 current_pdu_len = prs_data_size(current_pdu);
957
958         if (current_pdu_len != prhdr->frag_len) {
959                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
960                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
961                 return NT_STATUS_INVALID_PARAMETER;
962         }
963
964         /*
965          * Point the return values at the real data including the RPC
966          * header. Just in case the caller wants it.
967          */
968         *ppdata = prs_data_p(current_pdu);
969         *pdata_len = current_pdu_len;
970
971         /* Ensure we have the correct type. */
972         switch (prhdr->pkt_type) {
973                 case RPC_ALTCONTRESP:
974                 case RPC_BINDACK:
975
976                         /* Alter context and bind ack share the same packet definitions. */
977                         break;
978
979
980                 case RPC_RESPONSE:
981                 {
982                         RPC_HDR_RESP rhdr_resp;
983                         uint8 ss_padding_len = 0;
984
985                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
986                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
987                                 return NT_STATUS_BUFFER_TOO_SMALL;
988                         }
989
990                         /* Here's where we deal with incoming sign/seal. */
991                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
992                                         current_pdu, &ss_padding_len);
993                         if (!NT_STATUS_IS_OK(ret)) {
994                                 return ret;
995                         }
996
997                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
998                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
999
1000                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
1001                                 return NT_STATUS_BUFFER_TOO_SMALL;
1002                         }
1003
1004                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
1005
1006                         /* Remember to remove the auth footer. */
1007                         if (prhdr->auth_len) {
1008                                 /* We've already done integer wrap tests on auth_len in
1009                                         cli_pipe_validate_rpc_response(). */
1010                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
1011                                         return NT_STATUS_BUFFER_TOO_SMALL;
1012                                 }
1013                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
1014                         }
1015
1016                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
1017                                 current_pdu_len, *pdata_len, ss_padding_len ));
1018
1019                         /*
1020                          * If this is the first reply, and the allocation hint is reasonably, try and
1021                          * set up the return_data parse_struct to the correct size.
1022                          */
1023
1024                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1025                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1026                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1027                                                 "too large to allocate\n",
1028                                                 (unsigned int)rhdr_resp.alloc_hint ));
1029                                         return NT_STATUS_NO_MEMORY;
1030                                 }
1031                         }
1032
1033                         break;
1034                 }
1035
1036                 case RPC_BINDNACK:
1037                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1038                                   "received from %s!\n",
1039                                   rpccli_pipe_txt(debug_ctx(), cli)));
1040                         /* Use this for now... */
1041                         return NT_STATUS_NETWORK_ACCESS_DENIED;
1042
1043                 case RPC_FAULT:
1044                 {
1045                         RPC_HDR_RESP rhdr_resp;
1046                         RPC_HDR_FAULT fault_resp;
1047
1048                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1049                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1050                                 return NT_STATUS_BUFFER_TOO_SMALL;
1051                         }
1052
1053                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1054                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1055                                 return NT_STATUS_BUFFER_TOO_SMALL;
1056                         }
1057
1058                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1059                                   "code %s received from %s!\n",
1060                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1061                                 rpccli_pipe_txt(debug_ctx(), cli)));
1062                         if (NT_STATUS_IS_OK(fault_resp.status)) {
1063                                 return NT_STATUS_UNSUCCESSFUL;
1064                         } else {
1065                                 return fault_resp.status;
1066                         }
1067                 }
1068
1069                 default:
1070                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1071                                 "from %s!\n",
1072                                 (unsigned int)prhdr->pkt_type,
1073                                 rpccli_pipe_txt(debug_ctx(), cli)));
1074                         return NT_STATUS_INVALID_INFO_CLASS;
1075         }
1076
1077         if (prhdr->pkt_type != expected_pkt_type) {
1078                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1079                           "got an unexpected RPC packet type - %u, not %u\n",
1080                         rpccli_pipe_txt(debug_ctx(), cli),
1081                         prhdr->pkt_type,
1082                         expected_pkt_type));
1083                 return NT_STATUS_INVALID_INFO_CLASS;
1084         }
1085
1086         /* Do this just before return - we don't want to modify any rpc header
1087            data before now as we may have needed to do cryptographic actions on
1088            it before. */
1089
1090         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1091                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1092                         "setting fragment first/last ON.\n"));
1093                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1094         }
1095
1096         return NT_STATUS_OK;
1097 }
1098
1099 /****************************************************************************
1100  Ensure we eat the just processed pdu from the current_pdu prs_struct.
1101  Normally the frag_len and buffer size will match, but on the first trans
1102  reply there is a theoretical chance that buffer size > frag_len, so we must
1103  deal with that.
1104  ****************************************************************************/
1105
1106 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1107 {
1108         uint32 current_pdu_len = prs_data_size(current_pdu);
1109
1110         if (current_pdu_len < prhdr->frag_len) {
1111                 return NT_STATUS_BUFFER_TOO_SMALL;
1112         }
1113
1114         /* Common case. */
1115         if (current_pdu_len == (uint32)prhdr->frag_len) {
1116                 prs_mem_free(current_pdu);
1117                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1118                 /* Make current_pdu dynamic with no memory. */
1119                 prs_give_memory(current_pdu, 0, 0, True);
1120                 return NT_STATUS_OK;
1121         }
1122
1123         /*
1124          * Oh no ! More data in buffer than we processed in current pdu.
1125          * Cheat. Move the data down and shrink the buffer.
1126          */
1127
1128         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1129                         current_pdu_len - prhdr->frag_len);
1130
1131         /* Remember to set the read offset back to zero. */
1132         prs_set_offset(current_pdu, 0);
1133
1134         /* Shrink the buffer. */
1135         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1136                 return NT_STATUS_BUFFER_TOO_SMALL;
1137         }
1138
1139         return NT_STATUS_OK;
1140 }
1141
1142 /****************************************************************************
1143  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1144 ****************************************************************************/
1145
1146 struct cli_api_pipe_state {
1147         struct event_context *ev;
1148         struct rpc_pipe_client *cli;
1149         uint32_t max_rdata_len;
1150         uint8_t *rdata;
1151         uint32_t rdata_len;
1152 };
1153
1154 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1155 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1156 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1157
1158 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1159                                            struct event_context *ev,
1160                                            struct rpc_pipe_client *cli,
1161                                            uint8_t *data, size_t data_len,
1162                                            uint32_t max_rdata_len)
1163 {
1164         struct async_req *result, *subreq;
1165         struct cli_api_pipe_state *state;
1166         NTSTATUS status;
1167
1168         result = async_req_new(mem_ctx);
1169         if (result == NULL) {
1170                 return NULL;
1171         }
1172         state = talloc(result, struct cli_api_pipe_state);
1173         if (state == NULL) {
1174                 goto fail;
1175         }
1176         result->private_data = state;
1177
1178         state->ev = ev;
1179         state->cli = cli;
1180         state->max_rdata_len = max_rdata_len;
1181
1182         if (state->max_rdata_len < RPC_HEADER_LEN) {
1183                 /*
1184                  * For a RPC reply we always need at least RPC_HEADER_LEN
1185                  * bytes. We check this here because we will receive
1186                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1187                  */
1188                 status = NT_STATUS_INVALID_PARAMETER;
1189                 goto post_status;
1190         }
1191
1192         if (cli->transport_type == NCACN_NP) {
1193
1194                 uint16_t setup[2];
1195                 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1196                 SSVAL(setup+1, 0, cli->trans.np.fnum);
1197
1198                 subreq = cli_trans_send(
1199                         state, ev, cli->trans.np.cli, SMBtrans,
1200                         "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1201                         NULL, 0, 0, data, data_len, max_rdata_len);
1202                 if (subreq == NULL) {
1203                         status = NT_STATUS_NO_MEMORY;
1204                         goto post_status;
1205                 }
1206                 subreq->async.fn = cli_api_pipe_np_trans_done;
1207                 subreq->async.priv = result;
1208                 return result;
1209         }
1210
1211         if ((cli->transport_type == NCACN_IP_TCP)
1212             || (cli->transport_type == NCACN_UNIX_STREAM)) {
1213                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1214                                       data, data_len, 0);
1215                 if (subreq == NULL) {
1216                         status = NT_STATUS_NO_MEMORY;
1217                         goto post_status;
1218                 }
1219                 subreq->async.fn = cli_api_pipe_sock_send_done;
1220                 subreq->async.priv = result;
1221                 return result;
1222         }
1223
1224         status = NT_STATUS_INVALID_PARAMETER;
1225
1226  post_status:
1227         if (async_post_status(result, ev, status)) {
1228                 return result;
1229         }
1230  fail:
1231         TALLOC_FREE(result);
1232         return NULL;
1233 }
1234
1235 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1236 {
1237         struct async_req *req = talloc_get_type_abort(
1238                 subreq->async.priv, struct async_req);
1239         struct cli_api_pipe_state *state = talloc_get_type_abort(
1240                 req->private_data, struct cli_api_pipe_state);
1241         NTSTATUS status;
1242
1243         status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1244                                 &state->rdata, &state->rdata_len);
1245         TALLOC_FREE(subreq);
1246         if (!NT_STATUS_IS_OK(status)) {
1247                 async_req_error(req, status);
1248                 return;
1249         }
1250         async_req_done(req);
1251 }
1252
1253 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1254 {
1255         struct async_req *req = talloc_get_type_abort(
1256                 subreq->async.priv, struct async_req);
1257         struct cli_api_pipe_state *state = talloc_get_type_abort(
1258                 req->private_data, struct cli_api_pipe_state);
1259         NTSTATUS status;
1260
1261         status = sendall_recv(subreq);
1262         TALLOC_FREE(subreq);
1263         if (!NT_STATUS_IS_OK(status)) {
1264                 async_req_error(req, status);
1265                 return;
1266         }
1267
1268         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1269         if (async_req_nomem(state->rdata, req)) {
1270                 return;
1271         }
1272         state->rdata_len = RPC_HEADER_LEN;
1273
1274         subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1275                               state->rdata, RPC_HEADER_LEN, 0);
1276         if (async_req_nomem(subreq, req)) {
1277                 return;
1278         }
1279         subreq->async.fn = cli_api_pipe_sock_read_done;
1280         subreq->async.priv = req;
1281 }
1282
1283 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1284 {
1285         struct async_req *req = talloc_get_type_abort(
1286                 subreq->async.priv, struct async_req);
1287         NTSTATUS status;
1288
1289         status = recvall_recv(subreq);
1290         TALLOC_FREE(subreq);
1291         if (!NT_STATUS_IS_OK(status)) {
1292                 async_req_error(req, status);
1293                 return;
1294         }
1295         async_req_done(req);
1296 }
1297
1298 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1299                                   uint8_t **prdata, uint32_t *prdata_len)
1300 {
1301         struct cli_api_pipe_state *state = talloc_get_type_abort(
1302                 req->private_data, struct cli_api_pipe_state);
1303         NTSTATUS status;
1304
1305         if (async_req_is_error(req, &status)) {
1306                 return status;
1307         }
1308
1309         *prdata = talloc_move(mem_ctx, &state->rdata);
1310         *prdata_len = state->rdata_len;
1311         return NT_STATUS_OK;
1312 }
1313
1314 /****************************************************************************
1315  Send data on an rpc pipe via trans. The prs_struct data must be the last
1316  pdu fragment of an NDR data stream.
1317
1318  Receive response data from an rpc pipe, which may be large...
1319
1320  Read the first fragment: unfortunately have to use SMBtrans for the first
1321  bit, then SMBreadX for subsequent bits.
1322
1323  If first fragment received also wasn't the last fragment, continue
1324  getting fragments until we _do_ receive the last fragment.
1325
1326  Request/Response PDU's look like the following...
1327
1328  |<------------------PDU len----------------------------------------------->|
1329  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1330
1331  +------------+-----------------+-------------+---------------+-------------+
1332  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1333  +------------+-----------------+-------------+---------------+-------------+
1334
1335  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1336  signing & sealing being negotiated.
1337
1338  ****************************************************************************/
1339
1340 struct rpc_api_pipe_state {
1341         struct event_context *ev;
1342         struct rpc_pipe_client *cli;
1343         uint8_t expected_pkt_type;
1344
1345         prs_struct incoming_frag;
1346         struct rpc_hdr_info rhdr;
1347
1348         prs_struct incoming_pdu;        /* Incoming reply */
1349         uint32_t incoming_pdu_offset;
1350 };
1351
1352 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1353 {
1354         prs_mem_free(&state->incoming_frag);
1355         prs_mem_free(&state->incoming_pdu);
1356         return 0;
1357 }
1358
1359 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1360 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1361
1362 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1363                                            struct event_context *ev,
1364                                            struct rpc_pipe_client *cli,
1365                                            prs_struct *data, /* Outgoing PDU */
1366                                            uint8_t expected_pkt_type)
1367 {
1368         struct async_req *result, *subreq;
1369         struct rpc_api_pipe_state *state;
1370         NTSTATUS status;
1371
1372         result = async_req_new(mem_ctx);
1373         if (result == NULL) {
1374                 return NULL;
1375         }
1376         state = talloc(result, struct rpc_api_pipe_state);
1377         if (state == NULL) {
1378                 goto fail;
1379         }
1380         result->private_data = state;
1381
1382         state->ev = ev;
1383         state->cli = cli;
1384         state->expected_pkt_type = expected_pkt_type;
1385         state->incoming_pdu_offset = 0;
1386
1387         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1388
1389         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1390         /* Make incoming_pdu dynamic with no memory. */
1391         prs_give_memory(&state->incoming_pdu, 0, 0, true);
1392
1393         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1394
1395         /*
1396          * Ensure we're not sending too much.
1397          */
1398         if (prs_offset(data) > cli->max_xmit_frag) {
1399                 status = NT_STATUS_INVALID_PARAMETER;
1400                 goto post_status;
1401         }
1402
1403         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1404
1405         subreq = cli_api_pipe_send(state, ev, cli,
1406                                    (uint8_t *)prs_data_p(data),
1407                                    prs_offset(data), cli->max_recv_frag);
1408         if (subreq == NULL) {
1409                 status = NT_STATUS_NO_MEMORY;
1410                 goto post_status;
1411         }
1412         subreq->async.fn = rpc_api_pipe_trans_done;
1413         subreq->async.priv = result;
1414         return result;
1415
1416  post_status:
1417         if (async_post_status(result, ev, status)) {
1418                 return result;
1419         }
1420  fail:
1421         TALLOC_FREE(result);
1422         return NULL;
1423 }
1424
1425 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1426 {
1427         struct async_req *req = talloc_get_type_abort(
1428                 subreq->async.priv, struct async_req);
1429         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1430                 req->private_data, struct rpc_api_pipe_state);
1431         NTSTATUS status;
1432         uint8_t *rdata = NULL;
1433         uint32_t rdata_len = 0;
1434         char *rdata_copy;
1435
1436         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1437         TALLOC_FREE(subreq);
1438         if (!NT_STATUS_IS_OK(status)) {
1439                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1440                 async_req_error(req, status);
1441                 return;
1442         }
1443
1444         if (rdata == NULL) {
1445                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1446                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1447                 async_req_done(req);
1448                 return;
1449         }
1450
1451         /*
1452          * Give the memory received from cli_trans as dynamic to the current
1453          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1454          * :-(
1455          */
1456         rdata_copy = (char *)memdup(rdata, rdata_len);
1457         TALLOC_FREE(rdata);
1458         if (async_req_nomem(rdata_copy, req)) {
1459                 return;
1460         }
1461         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1462
1463         /* Ensure we have enough data for a pdu. */
1464         subreq = get_complete_frag_send(state, state->ev, state->cli,
1465                                         &state->rhdr, &state->incoming_frag);
1466         if (async_req_nomem(subreq, req)) {
1467                 return;
1468         }
1469         subreq->async.fn = rpc_api_pipe_got_pdu;
1470         subreq->async.priv = req;
1471 }
1472
1473 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1474 {
1475         struct async_req *req = talloc_get_type_abort(
1476                 subreq->async.priv, struct async_req);
1477         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1478                 req->private_data, struct rpc_api_pipe_state);
1479         NTSTATUS status;
1480         char *rdata = NULL;
1481         uint32_t rdata_len = 0;
1482
1483         status = get_complete_frag_recv(subreq);
1484         TALLOC_FREE(subreq);
1485         if (!NT_STATUS_IS_OK(status)) {
1486                 DEBUG(5, ("get_complete_frag failed: %s\n",
1487                           nt_errstr(status)));
1488                 async_req_error(req, status);
1489                 return;
1490         }
1491
1492         status = cli_pipe_validate_current_pdu(
1493                 state->cli, &state->rhdr, &state->incoming_frag,
1494                 state->expected_pkt_type, &rdata, &rdata_len,
1495                 &state->incoming_pdu);
1496
1497         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1498                   (unsigned)prs_data_size(&state->incoming_frag),
1499                   (unsigned)state->incoming_pdu_offset,
1500                   nt_errstr(status)));
1501
1502         if (!NT_STATUS_IS_OK(status)) {
1503                 async_req_error(req, status);
1504                 return;
1505         }
1506
1507         if ((state->rhdr.flags & RPC_FLG_FIRST)
1508             && (state->rhdr.pack_type[0] == 0)) {
1509                 /*
1510                  * Set the data type correctly for big-endian data on the
1511                  * first packet.
1512                  */
1513                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1514                           "big-endian.\n",
1515                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1516                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1517         }
1518         /*
1519          * Check endianness on subsequent packets.
1520          */
1521         if (state->incoming_frag.bigendian_data
1522             != state->incoming_pdu.bigendian_data) {
1523                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1524                          "%s\n",
1525                          state->incoming_pdu.bigendian_data?"big":"little",
1526                          state->incoming_frag.bigendian_data?"big":"little"));
1527                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1528                 return;
1529         }
1530
1531         /* Now copy the data portion out of the pdu into rbuf. */
1532         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1533                 async_req_error(req, NT_STATUS_NO_MEMORY);
1534                 return;
1535         }
1536
1537         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1538                rdata, (size_t)rdata_len);
1539         state->incoming_pdu_offset += rdata_len;
1540
1541         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1542                                             &state->incoming_frag);
1543         if (!NT_STATUS_IS_OK(status)) {
1544                 async_req_error(req, status);
1545                 return;
1546         }
1547
1548         if (state->rhdr.flags & RPC_FLG_LAST) {
1549                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1550                           rpccli_pipe_txt(debug_ctx(), state->cli),
1551                           (unsigned)prs_data_size(&state->incoming_pdu)));
1552                 async_req_done(req);
1553                 return;
1554         }
1555
1556         subreq = get_complete_frag_send(state, state->ev, state->cli,
1557                                         &state->rhdr, &state->incoming_frag);
1558         if (async_req_nomem(subreq, req)) {
1559                 return;
1560         }
1561         subreq->async.fn = rpc_api_pipe_got_pdu;
1562         subreq->async.priv = req;
1563 }
1564
1565 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1566                                   prs_struct *reply_pdu)
1567 {
1568         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1569                 req->private_data, struct rpc_api_pipe_state);
1570         NTSTATUS status;
1571
1572         if (async_req_is_error(req, &status)) {
1573                 return status;
1574         }
1575
1576         *reply_pdu = state->incoming_pdu;
1577         reply_pdu->mem_ctx = mem_ctx;
1578
1579         /*
1580          * Prevent state->incoming_pdu from being freed in
1581          * rpc_api_pipe_state_destructor()
1582          */
1583         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1584
1585         return NT_STATUS_OK;
1586 }
1587
1588 static NTSTATUS rpc_api_pipe(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
1589                              prs_struct *data, /* Outgoing pdu fragment,
1590                                                 * already formatted for
1591                                                 * send. */
1592                              prs_struct *rbuf, /* Incoming reply - return as
1593                                                 * an NDR stream. */
1594                              uint8 expected_pkt_type)
1595 {
1596         TALLOC_CTX *frame = talloc_stackframe();
1597         struct event_context *ev;
1598         struct async_req *req;
1599         NTSTATUS status = NT_STATUS_NO_MEMORY;
1600
1601         ev = event_context_init(frame);
1602         if (ev == NULL) {
1603                 goto fail;
1604         }
1605
1606         req = rpc_api_pipe_send(frame, ev, cli, data, expected_pkt_type);
1607         if (req == NULL) {
1608                 goto fail;
1609         }
1610
1611         while (req->state < ASYNC_REQ_DONE) {
1612                 event_loop_once(ev);
1613         }
1614
1615         status = rpc_api_pipe_recv(req, mem_ctx, rbuf);
1616  fail:
1617         TALLOC_FREE(frame);
1618         return status;
1619 }
1620
1621 /*******************************************************************
1622  Creates krb5 auth bind.
1623  ********************************************************************/
1624
1625 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1626                                                 enum pipe_auth_level auth_level,
1627                                                 RPC_HDR_AUTH *pauth_out,
1628                                                 prs_struct *auth_data)
1629 {
1630 #ifdef HAVE_KRB5
1631         int ret;
1632         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1633         DATA_BLOB tkt = data_blob_null;
1634         DATA_BLOB tkt_wrapped = data_blob_null;
1635
1636         /* We may change the pad length before marshalling. */
1637         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1638
1639         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1640                 a->service_principal ));
1641
1642         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1643
1644         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1645                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1646
1647         if (ret) {
1648                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1649                         "failed with %s\n",
1650                         a->service_principal,
1651                         error_message(ret) ));
1652
1653                 data_blob_free(&tkt);
1654                 prs_mem_free(auth_data);
1655                 return NT_STATUS_INVALID_PARAMETER;
1656         }
1657
1658         /* wrap that up in a nice GSS-API wrapping */
1659         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1660
1661         data_blob_free(&tkt);
1662
1663         /* Auth len in the rpc header doesn't include auth_header. */
1664         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1665                 data_blob_free(&tkt_wrapped);
1666                 prs_mem_free(auth_data);
1667                 return NT_STATUS_NO_MEMORY;
1668         }
1669
1670         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1671         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1672
1673         data_blob_free(&tkt_wrapped);
1674         return NT_STATUS_OK;
1675 #else
1676         return NT_STATUS_INVALID_PARAMETER;
1677 #endif
1678 }
1679
1680 /*******************************************************************
1681  Creates SPNEGO NTLMSSP auth bind.
1682  ********************************************************************/
1683
1684 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1685                                                 enum pipe_auth_level auth_level,
1686                                                 RPC_HDR_AUTH *pauth_out,
1687                                                 prs_struct *auth_data)
1688 {
1689         NTSTATUS nt_status;
1690         DATA_BLOB null_blob = data_blob_null;
1691         DATA_BLOB request = data_blob_null;
1692         DATA_BLOB spnego_msg = data_blob_null;
1693
1694         /* We may change the pad length before marshalling. */
1695         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1696
1697         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1698         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1699                                         null_blob,
1700                                         &request);
1701
1702         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1703                 data_blob_free(&request);
1704                 prs_mem_free(auth_data);
1705                 return nt_status;
1706         }
1707
1708         /* Wrap this in SPNEGO. */
1709         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1710
1711         data_blob_free(&request);
1712
1713         /* Auth len in the rpc header doesn't include auth_header. */
1714         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1715                 data_blob_free(&spnego_msg);
1716                 prs_mem_free(auth_data);
1717                 return NT_STATUS_NO_MEMORY;
1718         }
1719
1720         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1721         dump_data(5, spnego_msg.data, spnego_msg.length);
1722
1723         data_blob_free(&spnego_msg);
1724         return NT_STATUS_OK;
1725 }
1726
1727 /*******************************************************************
1728  Creates NTLMSSP auth bind.
1729  ********************************************************************/
1730
1731 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1732                                                 enum pipe_auth_level auth_level,
1733                                                 RPC_HDR_AUTH *pauth_out,
1734                                                 prs_struct *auth_data)
1735 {
1736         NTSTATUS nt_status;
1737         DATA_BLOB null_blob = data_blob_null;
1738         DATA_BLOB request = data_blob_null;
1739
1740         /* We may change the pad length before marshalling. */
1741         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1742
1743         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1744         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1745                                         null_blob,
1746                                         &request);
1747
1748         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1749                 data_blob_free(&request);
1750                 prs_mem_free(auth_data);
1751                 return nt_status;
1752         }
1753
1754         /* Auth len in the rpc header doesn't include auth_header. */
1755         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1756                 data_blob_free(&request);
1757                 prs_mem_free(auth_data);
1758                 return NT_STATUS_NO_MEMORY;
1759         }
1760
1761         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1762         dump_data(5, request.data, request.length);
1763
1764         data_blob_free(&request);
1765         return NT_STATUS_OK;
1766 }
1767
1768 /*******************************************************************
1769  Creates schannel auth bind.
1770  ********************************************************************/
1771
1772 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1773                                                 enum pipe_auth_level auth_level,
1774                                                 RPC_HDR_AUTH *pauth_out,
1775                                                 prs_struct *auth_data)
1776 {
1777         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1778
1779         /* We may change the pad length before marshalling. */
1780         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1781
1782         /* Use lp_workgroup() if domain not specified */
1783
1784         if (!cli->auth->domain || !cli->auth->domain[0]) {
1785                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1786                 if (cli->auth->domain == NULL) {
1787                         return NT_STATUS_NO_MEMORY;
1788                 }
1789         }
1790
1791         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1792                                    global_myname());
1793
1794         /*
1795          * Now marshall the data into the auth parse_struct.
1796          */
1797
1798         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1799                                        &schannel_neg, auth_data, 0)) {
1800                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1801                 prs_mem_free(auth_data);
1802                 return NT_STATUS_NO_MEMORY;
1803         }
1804
1805         return NT_STATUS_OK;
1806 }
1807
1808 /*******************************************************************
1809  Creates the internals of a DCE/RPC bind request or alter context PDU.
1810  ********************************************************************/
1811
1812 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1813                                                 prs_struct *rpc_out, 
1814                                                 uint32 rpc_call_id,
1815                                                 const RPC_IFACE *abstract,
1816                                                 const RPC_IFACE *transfer,
1817                                                 RPC_HDR_AUTH *phdr_auth,
1818                                                 prs_struct *pauth_info)
1819 {
1820         RPC_HDR hdr;
1821         RPC_HDR_RB hdr_rb;
1822         RPC_CONTEXT rpc_ctx;
1823         uint16 auth_len = prs_offset(pauth_info);
1824         uint8 ss_padding_len = 0;
1825         uint16 frag_len = 0;
1826
1827         /* create the RPC context. */
1828         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1829
1830         /* create the bind request RPC_HDR_RB */
1831         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1832
1833         /* Start building the frag length. */
1834         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1835
1836         /* Do we need to pad ? */
1837         if (auth_len) {
1838                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1839                 if (data_len % 8) {
1840                         ss_padding_len = 8 - (data_len % 8);
1841                         phdr_auth->auth_pad_len = ss_padding_len;
1842                 }
1843                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1844         }
1845
1846         /* Create the request RPC_HDR */
1847         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1848
1849         /* Marshall the RPC header */
1850         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1851                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1852                 return NT_STATUS_NO_MEMORY;
1853         }
1854
1855         /* Marshall the bind request data */
1856         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1857                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1858                 return NT_STATUS_NO_MEMORY;
1859         }
1860
1861         /*
1862          * Grow the outgoing buffer to store any auth info.
1863          */
1864
1865         if(auth_len != 0) {
1866                 if (ss_padding_len) {
1867                         char pad[8];
1868                         memset(pad, '\0', 8);
1869                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1870                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1871                                 return NT_STATUS_NO_MEMORY;
1872                         }
1873                 }
1874
1875                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1876                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1877                         return NT_STATUS_NO_MEMORY;
1878                 }
1879
1880
1881                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1882                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1883                         return NT_STATUS_NO_MEMORY;
1884                 }
1885         }
1886
1887         return NT_STATUS_OK;
1888 }
1889
1890 /*******************************************************************
1891  Creates a DCE/RPC bind request.
1892  ********************************************************************/
1893
1894 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1895                                 prs_struct *rpc_out, 
1896                                 uint32 rpc_call_id,
1897                                 const RPC_IFACE *abstract,
1898                                 const RPC_IFACE *transfer,
1899                                 enum pipe_auth_type auth_type,
1900                                 enum pipe_auth_level auth_level)
1901 {
1902         RPC_HDR_AUTH hdr_auth;
1903         prs_struct auth_info;
1904         NTSTATUS ret = NT_STATUS_OK;
1905
1906         ZERO_STRUCT(hdr_auth);
1907         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1908                 return NT_STATUS_NO_MEMORY;
1909
1910         switch (auth_type) {
1911                 case PIPE_AUTH_TYPE_SCHANNEL:
1912                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1913                         if (!NT_STATUS_IS_OK(ret)) {
1914                                 prs_mem_free(&auth_info);
1915                                 return ret;
1916                         }
1917                         break;
1918
1919                 case PIPE_AUTH_TYPE_NTLMSSP:
1920                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1921                         if (!NT_STATUS_IS_OK(ret)) {
1922                                 prs_mem_free(&auth_info);
1923                                 return ret;
1924                         }
1925                         break;
1926
1927                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1928                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1929                         if (!NT_STATUS_IS_OK(ret)) {
1930                                 prs_mem_free(&auth_info);
1931                                 return ret;
1932                         }
1933                         break;
1934
1935                 case PIPE_AUTH_TYPE_KRB5:
1936                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1937                         if (!NT_STATUS_IS_OK(ret)) {
1938                                 prs_mem_free(&auth_info);
1939                                 return ret;
1940                         }
1941                         break;
1942
1943                 case PIPE_AUTH_TYPE_NONE:
1944                         break;
1945
1946                 default:
1947                         /* "Can't" happen. */
1948                         return NT_STATUS_INVALID_INFO_CLASS;
1949         }
1950
1951         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1952                                                 rpc_out, 
1953                                                 rpc_call_id,
1954                                                 abstract,
1955                                                 transfer,
1956                                                 &hdr_auth,
1957                                                 &auth_info);
1958
1959         prs_mem_free(&auth_info);
1960         return ret;
1961 }
1962
1963 /*******************************************************************
1964  Create and add the NTLMSSP sign/seal auth header and data.
1965  ********************************************************************/
1966
1967 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1968                                         RPC_HDR *phdr,
1969                                         uint32 ss_padding_len,
1970                                         prs_struct *outgoing_pdu)
1971 {
1972         RPC_HDR_AUTH auth_info;
1973         NTSTATUS status;
1974         DATA_BLOB auth_blob = data_blob_null;
1975         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1976
1977         if (!cli->auth->a_u.ntlmssp_state) {
1978                 return NT_STATUS_INVALID_PARAMETER;
1979         }
1980
1981         /* Init and marshall the auth header. */
1982         init_rpc_hdr_auth(&auth_info,
1983                         map_pipe_auth_type_to_rpc_auth_type(
1984                                 cli->auth->auth_type),
1985                         cli->auth->auth_level,
1986                         ss_padding_len,
1987                         1 /* context id. */);
1988
1989         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1990                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1991                 data_blob_free(&auth_blob);
1992                 return NT_STATUS_NO_MEMORY;
1993         }
1994
1995         switch (cli->auth->auth_level) {
1996                 case PIPE_AUTH_LEVEL_PRIVACY:
1997                         /* Data portion is encrypted. */
1998                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1999                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
2000                                         data_and_pad_len,
2001                                         (unsigned char *)prs_data_p(outgoing_pdu),
2002                                         (size_t)prs_offset(outgoing_pdu),
2003                                         &auth_blob);
2004                         if (!NT_STATUS_IS_OK(status)) {
2005                                 data_blob_free(&auth_blob);
2006                                 return status;
2007                         }
2008                         break;
2009
2010                 case PIPE_AUTH_LEVEL_INTEGRITY:
2011                         /* Data is signed. */
2012                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
2013                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
2014                                         data_and_pad_len,
2015                                         (unsigned char *)prs_data_p(outgoing_pdu),
2016                                         (size_t)prs_offset(outgoing_pdu),
2017                                         &auth_blob);
2018                         if (!NT_STATUS_IS_OK(status)) {
2019                                 data_blob_free(&auth_blob);
2020                                 return status;
2021                         }
2022                         break;
2023
2024                 default:
2025                         /* Can't happen. */
2026                         smb_panic("bad auth level");
2027                         /* Notreached. */
2028                         return NT_STATUS_INVALID_PARAMETER;
2029         }
2030
2031         /* Finally marshall the blob. */
2032
2033         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
2034                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
2035                         (unsigned int)NTLMSSP_SIG_SIZE));
2036                 data_blob_free(&auth_blob);
2037                 return NT_STATUS_NO_MEMORY;
2038         }
2039
2040         data_blob_free(&auth_blob);
2041         return NT_STATUS_OK;
2042 }
2043
2044 /*******************************************************************
2045  Create and add the schannel sign/seal auth header and data.
2046  ********************************************************************/
2047
2048 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
2049                                         RPC_HDR *phdr,
2050                                         uint32 ss_padding_len,
2051                                         prs_struct *outgoing_pdu)
2052 {
2053         RPC_HDR_AUTH auth_info;
2054         RPC_AUTH_SCHANNEL_CHK verf;
2055         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
2056         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
2057         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
2058
2059         if (!sas) {
2060                 return NT_STATUS_INVALID_PARAMETER;
2061         }
2062
2063         /* Init and marshall the auth header. */
2064         init_rpc_hdr_auth(&auth_info,
2065                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2066                         cli->auth->auth_level,
2067                         ss_padding_len,
2068                         1 /* context id. */);
2069
2070         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2071                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2072                 return NT_STATUS_NO_MEMORY;
2073         }
2074
2075         switch (cli->auth->auth_level) {
2076                 case PIPE_AUTH_LEVEL_PRIVACY:
2077                 case PIPE_AUTH_LEVEL_INTEGRITY:
2078                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2079                                 sas->seq_num));
2080
2081                         schannel_encode(sas,
2082                                         cli->auth->auth_level,
2083                                         SENDER_IS_INITIATOR,
2084                                         &verf,
2085                                         data_p,
2086                                         data_and_pad_len);
2087
2088                         sas->seq_num++;
2089                         break;
2090
2091                 default:
2092                         /* Can't happen. */
2093                         smb_panic("bad auth level");
2094                         /* Notreached. */
2095                         return NT_STATUS_INVALID_PARAMETER;
2096         }
2097
2098         /* Finally marshall the blob. */
2099         smb_io_rpc_auth_schannel_chk("",
2100                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2101                         &verf,
2102                         outgoing_pdu,
2103                         0);
2104
2105         return NT_STATUS_OK;
2106 }
2107
2108 /*******************************************************************
2109  Calculate how much data we're going to send in this packet, also
2110  work out any sign/seal padding length.
2111  ********************************************************************/
2112
2113 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2114                                         uint32 data_left,
2115                                         uint16 *p_frag_len,
2116                                         uint16 *p_auth_len,
2117                                         uint32 *p_ss_padding)
2118 {
2119         uint32 data_space, data_len;
2120
2121         switch (cli->auth->auth_level) {
2122                 case PIPE_AUTH_LEVEL_NONE:
2123                 case PIPE_AUTH_LEVEL_CONNECT:
2124                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2125                         data_len = MIN(data_space, data_left);
2126                         *p_ss_padding = 0;
2127                         *p_auth_len = 0;
2128                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2129                         return data_len;
2130
2131                 case PIPE_AUTH_LEVEL_INTEGRITY:
2132                 case PIPE_AUTH_LEVEL_PRIVACY:
2133                         /* Treat the same for all authenticated rpc requests. */
2134                         switch(cli->auth->auth_type) {
2135                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2136                                 case PIPE_AUTH_TYPE_NTLMSSP:
2137                                         *p_auth_len = NTLMSSP_SIG_SIZE;
2138                                         break;
2139                                 case PIPE_AUTH_TYPE_SCHANNEL:
2140                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2141                                         break;
2142                                 default:
2143                                         smb_panic("bad auth type");
2144                                         break;
2145                         }
2146
2147                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2148                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2149
2150                         data_len = MIN(data_space, data_left);
2151                         *p_ss_padding = 0;
2152                         if (data_len % 8) {
2153                                 *p_ss_padding = 8 - (data_len % 8);
2154                         }
2155                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2156                                         data_len + *p_ss_padding +              /* data plus padding. */
2157                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2158                         return data_len;
2159
2160                 default:
2161                         smb_panic("bad auth level");
2162                         /* Notreached. */
2163                         return 0;
2164         }
2165 }
2166
2167 /*******************************************************************
2168  External interface.
2169  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2170  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2171  and deals with signing/sealing details.
2172  ********************************************************************/
2173
2174 struct rpc_api_pipe_req_state {
2175         struct event_context *ev;
2176         struct rpc_pipe_client *cli;
2177         uint8_t op_num;
2178         uint32_t call_id;
2179         prs_struct *req_data;
2180         uint32_t req_data_sent;
2181         prs_struct outgoing_frag;
2182         prs_struct reply_pdu;
2183 };
2184
2185 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2186 {
2187         prs_mem_free(&s->outgoing_frag);
2188         prs_mem_free(&s->reply_pdu);
2189         return 0;
2190 }
2191
2192 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2193 static void rpc_api_pipe_req_done(struct async_req *subreq);
2194 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2195                                   bool *is_last_frag);
2196
2197 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2198                                         struct event_context *ev,
2199                                         struct rpc_pipe_client *cli,
2200                                         uint8_t op_num,
2201                                         prs_struct *req_data)
2202 {
2203         struct async_req *result, *subreq;
2204         struct rpc_api_pipe_req_state *state;
2205         NTSTATUS status;
2206         bool is_last_frag;
2207
2208         result = async_req_new(mem_ctx);
2209         if (result == NULL) {
2210                 return NULL;
2211         }
2212         state = talloc(result, struct rpc_api_pipe_req_state);
2213         if (state == NULL) {
2214                 goto fail;
2215         }
2216         result->private_data = state;
2217
2218         state->ev = ev;
2219         state->cli = cli;
2220         state->op_num = op_num;
2221         state->req_data = req_data;
2222         state->req_data_sent = 0;
2223         state->call_id = get_rpc_call_id();
2224
2225         if (cli->max_xmit_frag
2226             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2227                 /* Server is screwed up ! */
2228                 status = NT_STATUS_INVALID_PARAMETER;
2229                 goto post_status;
2230         }
2231
2232         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2233
2234         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2235                       state, MARSHALL)) {
2236                 status = NT_STATUS_NO_MEMORY;
2237                 goto post_status;
2238         }
2239
2240         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2241
2242         status = prepare_next_frag(state, &is_last_frag);
2243         if (!NT_STATUS_IS_OK(status)) {
2244                 goto post_status;
2245         }
2246
2247         if (is_last_frag) {
2248                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2249                                            &state->outgoing_frag,
2250                                            RPC_RESPONSE);
2251                 if (subreq == NULL) {
2252                         status = NT_STATUS_NO_MEMORY;
2253                         goto post_status;
2254                 }
2255                 subreq->async.fn = rpc_api_pipe_req_done;
2256                 subreq->async.priv = result;
2257         } else {
2258                 subreq = rpc_write_send(state, ev, cli,
2259                                         prs_data_p(&state->outgoing_frag),
2260                                         prs_offset(&state->outgoing_frag));
2261                 if (subreq == NULL) {
2262                         status = NT_STATUS_NO_MEMORY;
2263                         goto post_status;
2264                 }
2265                 subreq->async.fn = rpc_api_pipe_req_write_done;
2266                 subreq->async.priv = result;
2267         }
2268         return result;
2269
2270  post_status:
2271         if (async_post_status(result, ev, status)) {
2272                 return result;
2273         }
2274  fail:
2275         TALLOC_FREE(result);
2276         return NULL;
2277 }
2278
2279 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2280                                   bool *is_last_frag)
2281 {
2282         RPC_HDR hdr;
2283         RPC_HDR_REQ hdr_req;
2284         uint32_t data_sent_thistime;
2285         uint16_t auth_len;
2286         uint16_t frag_len;
2287         uint8_t flags = 0;
2288         uint32_t ss_padding;
2289         uint32_t data_left;
2290         char pad[8] = { 0, };
2291         NTSTATUS status;
2292
2293         data_left = prs_offset(state->req_data) - state->req_data_sent;
2294
2295         data_sent_thistime = calculate_data_len_tosend(
2296                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2297
2298         if (state->req_data_sent == 0) {
2299                 flags = RPC_FLG_FIRST;
2300         }
2301
2302         if (data_sent_thistime == data_left) {
2303                 flags |= RPC_FLG_LAST;
2304         }
2305
2306         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2307                 return NT_STATUS_NO_MEMORY;
2308         }
2309
2310         /* Create and marshall the header and request header. */
2311         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2312                      auth_len);
2313
2314         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2315                 return NT_STATUS_NO_MEMORY;
2316         }
2317
2318         /* Create the rpc request RPC_HDR_REQ */
2319         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2320                          state->op_num);
2321
2322         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2323                                 &state->outgoing_frag, 0)) {
2324                 return NT_STATUS_NO_MEMORY;
2325         }
2326
2327         /* Copy in the data, plus any ss padding. */
2328         if (!prs_append_some_prs_data(&state->outgoing_frag,
2329                                       state->req_data, state->req_data_sent,
2330                                       data_sent_thistime)) {
2331                 return NT_STATUS_NO_MEMORY;
2332         }
2333
2334         /* Copy the sign/seal padding data. */
2335         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2336                 return NT_STATUS_NO_MEMORY;
2337         }
2338
2339         /* Generate any auth sign/seal and add the auth footer. */
2340         switch (state->cli->auth->auth_type) {
2341         case PIPE_AUTH_TYPE_NONE:
2342                 status = NT_STATUS_OK;
2343                 break;
2344         case PIPE_AUTH_TYPE_NTLMSSP:
2345         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2346                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2347                                                  &state->outgoing_frag);
2348                 break;
2349         case PIPE_AUTH_TYPE_SCHANNEL:
2350                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2351                                                   &state->outgoing_frag);
2352                 break;
2353         default:
2354                 status = NT_STATUS_INVALID_PARAMETER;
2355                 break;
2356         }
2357
2358         state->req_data_sent += data_sent_thistime;
2359         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2360
2361         return status;
2362 }
2363
2364 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2365 {
2366         struct async_req *req = talloc_get_type_abort(
2367                 subreq->async.priv, struct async_req);
2368         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2369                 req->private_data, struct rpc_api_pipe_req_state);
2370         NTSTATUS status;
2371         bool is_last_frag;
2372
2373         status = rpc_write_recv(subreq);
2374         TALLOC_FREE(subreq);
2375         if (!NT_STATUS_IS_OK(status)) {
2376                 async_req_error(req, status);
2377                 return;
2378         }
2379
2380         status = prepare_next_frag(state, &is_last_frag);
2381         if (!NT_STATUS_IS_OK(status)) {
2382                 async_req_error(req, status);
2383                 return;
2384         }
2385
2386         if (is_last_frag) {
2387                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2388                                            &state->outgoing_frag,
2389                                            RPC_RESPONSE);
2390                 if (async_req_nomem(subreq, req)) {
2391                         return;
2392                 }
2393                 subreq->async.fn = rpc_api_pipe_req_done;
2394                 subreq->async.priv = req;
2395         } else {
2396                 subreq = rpc_write_send(state, state->ev, state->cli,
2397                                         prs_data_p(&state->outgoing_frag),
2398                                         prs_offset(&state->outgoing_frag));
2399                 if (async_req_nomem(subreq, req)) {
2400                         return;
2401                 }
2402                 subreq->async.fn = rpc_api_pipe_req_write_done;
2403                 subreq->async.priv = req;
2404         }
2405 }
2406
2407 static void rpc_api_pipe_req_done(struct async_req *subreq)
2408 {
2409         struct async_req *req = talloc_get_type_abort(
2410                 subreq->async.priv, struct async_req);
2411         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2412                 req->private_data, struct rpc_api_pipe_req_state);
2413         NTSTATUS status;
2414
2415         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2416         TALLOC_FREE(subreq);
2417         if (!NT_STATUS_IS_OK(status)) {
2418                 async_req_error(req, status);
2419                 return;
2420         }
2421         async_req_done(req);
2422 }
2423
2424 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2425                                prs_struct *reply_pdu)
2426 {
2427         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2428                 req->private_data, struct rpc_api_pipe_req_state);
2429         NTSTATUS status;
2430
2431         if (async_req_is_error(req, &status)) {
2432                 return status;
2433         }
2434
2435         *reply_pdu = state->reply_pdu;
2436         reply_pdu->mem_ctx = mem_ctx;
2437
2438         /*
2439          * Prevent state->req_pdu from being freed in
2440          * rpc_api_pipe_req_state_destructor()
2441          */
2442         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2443
2444         return NT_STATUS_OK;
2445 }
2446
2447 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2448                         uint8 op_num,
2449                         prs_struct *in_data,
2450                         prs_struct *out_data)
2451 {
2452         TALLOC_CTX *frame = talloc_stackframe();
2453         struct event_context *ev;
2454         struct async_req *req;
2455         NTSTATUS status = NT_STATUS_NO_MEMORY;
2456
2457         ev = event_context_init(frame);
2458         if (ev == NULL) {
2459                 goto fail;
2460         }
2461
2462         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2463         if (req == NULL) {
2464                 goto fail;
2465         }
2466
2467         while (req->state < ASYNC_REQ_DONE) {
2468                 event_loop_once(ev);
2469         }
2470
2471         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2472  fail:
2473         TALLOC_FREE(frame);
2474         return status;
2475 }
2476
2477 #if 0
2478 /****************************************************************************
2479  Set the handle state.
2480 ****************************************************************************/
2481
2482 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2483                                    const char *pipe_name, uint16 device_state)
2484 {
2485         bool state_set = False;
2486         char param[2];
2487         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2488         char *rparam = NULL;
2489         char *rdata = NULL;
2490         uint32 rparam_len, rdata_len;
2491
2492         if (pipe_name == NULL)
2493                 return False;
2494
2495         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2496                  cli->fnum, pipe_name, device_state));
2497
2498         /* create parameters: device state */
2499         SSVAL(param, 0, device_state);
2500
2501         /* create setup parameters. */
2502         setup[0] = 0x0001; 
2503         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2504
2505         /* send the data on \PIPE\ */
2506         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2507                     setup, 2, 0,                /* setup, length, max */
2508                     param, 2, 0,                /* param, length, max */
2509                     NULL, 0, 1024,              /* data, length, max */
2510                     &rparam, &rparam_len,        /* return param, length */
2511                     &rdata, &rdata_len))         /* return data, length */
2512         {
2513                 DEBUG(5, ("Set Handle state: return OK\n"));
2514                 state_set = True;
2515         }
2516
2517         SAFE_FREE(rparam);
2518         SAFE_FREE(rdata);
2519
2520         return state_set;
2521 }
2522 #endif
2523
2524 /****************************************************************************
2525  Check the rpc bind acknowledge response.
2526 ****************************************************************************/
2527
2528 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2529 {
2530         if ( hdr_ba->addr.len == 0) {
2531                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2532         }
2533
2534         /* check the transfer syntax */
2535         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2536              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2537                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2538                 return False;
2539         }
2540
2541         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2542                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2543                           hdr_ba->res.num_results, hdr_ba->res.reason));
2544         }
2545
2546         DEBUG(5,("check_bind_response: accepted!\n"));
2547         return True;
2548 }
2549
2550 /*******************************************************************
2551  Creates a DCE/RPC bind authentication response.
2552  This is the packet that is sent back to the server once we
2553  have received a BIND-ACK, to finish the third leg of
2554  the authentication handshake.
2555  ********************************************************************/
2556
2557 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2558                                 uint32 rpc_call_id,
2559                                 enum pipe_auth_type auth_type,
2560                                 enum pipe_auth_level auth_level,
2561                                 DATA_BLOB *pauth_blob,
2562                                 prs_struct *rpc_out)
2563 {
2564         RPC_HDR hdr;
2565         RPC_HDR_AUTH hdr_auth;
2566         uint32 pad = 0;
2567
2568         /* Create the request RPC_HDR */
2569         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2570                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2571                      pauth_blob->length );
2572
2573         /* Marshall it. */
2574         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2575                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2576                 return NT_STATUS_NO_MEMORY;
2577         }
2578
2579         /*
2580                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2581                 about padding - shouldn't this pad to length 8 ? JRA.
2582         */
2583
2584         /* 4 bytes padding. */
2585         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2586                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2587                 return NT_STATUS_NO_MEMORY;
2588         }
2589
2590         /* Create the request RPC_HDR_AUTHA */
2591         init_rpc_hdr_auth(&hdr_auth,
2592                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2593                         auth_level, 0, 1);
2594
2595         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2596                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2597                 return NT_STATUS_NO_MEMORY;
2598         }
2599
2600         /*
2601          * Append the auth data to the outgoing buffer.
2602          */
2603
2604         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2605                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2606                 return NT_STATUS_NO_MEMORY;
2607         }
2608
2609         return NT_STATUS_OK;
2610 }
2611
2612 /****************************************************************************
2613  Create and send the third packet in an RPC auth.
2614 ****************************************************************************/
2615
2616 static NTSTATUS rpc_finish_auth3_bind(struct rpc_pipe_client *cli,
2617                                 RPC_HDR *phdr,
2618                                 prs_struct *rbuf,
2619                                 uint32 rpc_call_id,
2620                                 enum pipe_auth_type auth_type,
2621                                 enum pipe_auth_level auth_level)
2622 {
2623         DATA_BLOB server_response = data_blob_null;
2624         DATA_BLOB client_reply = data_blob_null;
2625         RPC_HDR_AUTH hdr_auth;
2626         NTSTATUS nt_status;
2627         prs_struct rpc_out;
2628         ssize_t ret;
2629
2630         if (!phdr->auth_len || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2631                 return NT_STATUS_INVALID_PARAMETER;
2632         }
2633
2634         /* Process the returned NTLMSSP blob first. */
2635         if (!prs_set_offset(rbuf, phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2636                 return NT_STATUS_INVALID_PARAMETER;
2637         }
2638
2639         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rbuf, 0)) {
2640                 return NT_STATUS_INVALID_PARAMETER;
2641         }
2642
2643         /* TODO - check auth_type/auth_level match. */
2644
2645         server_response = data_blob(NULL, phdr->auth_len);
2646         prs_copy_data_out((char *)server_response.data, rbuf, phdr->auth_len);
2647
2648         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
2649                                    server_response,
2650                                    &client_reply);
2651
2652         if (!NT_STATUS_IS_OK(nt_status)) {
2653                 DEBUG(0,("rpc_finish_auth3_bind: NTLMSSP update using server blob failed.\n"));
2654                 data_blob_free(&server_response);
2655                 return nt_status;
2656         }
2657
2658         prs_init_empty(&rpc_out, prs_get_mem_context(rbuf), MARSHALL);
2659
2660         nt_status = create_rpc_bind_auth3(cli, rpc_call_id,
2661                                 auth_type, auth_level,
2662                                 &client_reply, &rpc_out);
2663
2664         if (!NT_STATUS_IS_OK(nt_status)) {
2665                 prs_mem_free(&rpc_out);
2666                 data_blob_free(&client_reply);
2667                 data_blob_free(&server_response);
2668                 return nt_status;
2669         }
2670
2671         switch (cli->transport_type) {
2672         case NCACN_NP:
2673                 /* 8 here is named pipe message mode. */
2674                 ret = cli_write(cli->trans.np.cli, cli->trans.np.fnum,
2675                                 0x8, prs_data_p(&rpc_out), 0,
2676                                 (size_t)prs_offset(&rpc_out));
2677                 break;
2678
2679                 if (ret != (ssize_t)prs_offset(&rpc_out)) {
2680                         nt_status = cli_get_nt_error(cli->trans.np.cli);
2681                 }
2682         case NCACN_IP_TCP:
2683         case NCACN_UNIX_STREAM:
2684                 ret = write_data(cli->trans.sock.fd, prs_data_p(&rpc_out),
2685                                  (size_t)prs_offset(&rpc_out));
2686                 if (ret != (ssize_t)prs_offset(&rpc_out)) {
2687                         nt_status = map_nt_error_from_unix(errno);
2688                 }
2689                 break;
2690         default:
2691                 DEBUG(0, ("unknown transport type %d\n", cli->transport_type));
2692                 return NT_STATUS_INTERNAL_ERROR;
2693         }
2694
2695         if (ret != (ssize_t)prs_offset(&rpc_out)) {
2696                 DEBUG(0,("rpc_send_auth_auth3: write failed. Return was %s\n",
2697                          nt_errstr(nt_status)));
2698                 prs_mem_free(&rpc_out);
2699                 data_blob_free(&client_reply);
2700                 data_blob_free(&server_response);
2701                 return nt_status;
2702         }
2703
2704         DEBUG(5,("rpc_send_auth_auth3: %s sent auth3 response ok.\n",
2705                  rpccli_pipe_txt(debug_ctx(), cli)));
2706
2707         prs_mem_free(&rpc_out);
2708         data_blob_free(&client_reply);
2709         data_blob_free(&server_response);
2710         return NT_STATUS_OK;
2711 }
2712
2713 /*******************************************************************
2714  Creates a DCE/RPC bind alter context authentication request which
2715  may contain a spnego auth blobl
2716  ********************************************************************/
2717
2718 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2719                                         const RPC_IFACE *abstract,
2720                                         const RPC_IFACE *transfer,
2721                                         enum pipe_auth_level auth_level,
2722                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2723                                         prs_struct *rpc_out)
2724 {
2725         RPC_HDR_AUTH hdr_auth;
2726         prs_struct auth_info;
2727         NTSTATUS ret = NT_STATUS_OK;
2728
2729         ZERO_STRUCT(hdr_auth);
2730         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2731                 return NT_STATUS_NO_MEMORY;
2732
2733         /* We may change the pad length before marshalling. */
2734         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2735
2736         if (pauth_blob->length) {
2737                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2738                         prs_mem_free(&auth_info);
2739                         return NT_STATUS_NO_MEMORY;
2740                 }
2741         }
2742
2743         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2744                                                 rpc_out, 
2745                                                 rpc_call_id,
2746                                                 abstract,
2747                                                 transfer,
2748                                                 &hdr_auth,
2749                                                 &auth_info);
2750         prs_mem_free(&auth_info);
2751         return ret;
2752 }
2753
2754 /*******************************************************************
2755  Third leg of the SPNEGO bind mechanism - sends alter context PDU
2756  and gets a response.
2757  ********************************************************************/
2758
2759 static NTSTATUS rpc_finish_spnego_ntlmssp_bind(struct rpc_pipe_client *cli,
2760                                 RPC_HDR *phdr,
2761                                 prs_struct *rbuf,
2762                                 uint32 rpc_call_id,
2763                                 const RPC_IFACE *abstract,
2764                                 const RPC_IFACE *transfer,
2765                                 enum pipe_auth_type auth_type,
2766                                 enum pipe_auth_level auth_level)
2767 {
2768         DATA_BLOB server_spnego_response = data_blob_null;
2769         DATA_BLOB server_ntlm_response = data_blob_null;
2770         DATA_BLOB client_reply = data_blob_null;
2771         DATA_BLOB tmp_blob = data_blob_null;
2772         RPC_HDR_AUTH hdr_auth;
2773         NTSTATUS nt_status;
2774         prs_struct rpc_out;
2775
2776         if (!phdr->auth_len || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2777                 return NT_STATUS_INVALID_PARAMETER;
2778         }
2779
2780         /* Process the returned NTLMSSP blob first. */
2781         if (!prs_set_offset(rbuf, phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2782                 return NT_STATUS_INVALID_PARAMETER;
2783         }
2784
2785         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rbuf, 0)) {
2786                 return NT_STATUS_INVALID_PARAMETER;
2787         }
2788
2789         server_spnego_response = data_blob(NULL, phdr->auth_len);
2790         prs_copy_data_out((char *)server_spnego_response.data, rbuf, phdr->auth_len);
2791
2792         /* The server might give us back two challenges - tmp_blob is for the second. */
2793         if (!spnego_parse_challenge(server_spnego_response, &server_ntlm_response, &tmp_blob)) {
2794                 data_blob_free(&server_spnego_response);
2795                 data_blob_free(&server_ntlm_response);
2796                 data_blob_free(&tmp_blob);
2797                 return NT_STATUS_INVALID_PARAMETER;
2798         }
2799
2800         /* We're finished with the server spnego response and the tmp_blob. */
2801         data_blob_free(&server_spnego_response);
2802         data_blob_free(&tmp_blob);
2803
2804         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
2805                                    server_ntlm_response,
2806                                    &client_reply);
2807
2808         /* Finished with the server_ntlm response */
2809         data_blob_free(&server_ntlm_response);
2810
2811         if (!NT_STATUS_IS_OK(nt_status)) {
2812                 DEBUG(0,("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update using server blob failed.\n"));
2813                 data_blob_free(&client_reply);
2814                 return nt_status;
2815         }
2816
2817         /* SPNEGO wrap the client reply. */
2818         tmp_blob = spnego_gen_auth(client_reply);
2819         data_blob_free(&client_reply);
2820         client_reply = tmp_blob;
2821         tmp_blob = data_blob_null; /* Ensure it's safe to free this just in case. */
2822
2823         /* Now prepare the alter context pdu. */
2824         prs_init_empty(&rpc_out, prs_get_mem_context(rbuf), MARSHALL);
2825
2826         nt_status = create_rpc_alter_context(rpc_call_id,
2827                                                 abstract,
2828                                                 transfer,
2829                                                 auth_level,
2830                                                 &client_reply,
2831                                                 &rpc_out);
2832
2833         data_blob_free(&client_reply);
2834
2835         if (!NT_STATUS_IS_OK(nt_status)) {
2836                 prs_mem_free(&rpc_out);
2837                 return nt_status;
2838         }
2839
2840         /* Initialize the returning data struct. */
2841         prs_mem_free(rbuf);
2842
2843         nt_status = rpc_api_pipe(talloc_tos(), cli, &rpc_out, rbuf,
2844                                  RPC_ALTCONTRESP);
2845         prs_mem_free(&rpc_out);
2846         if (!NT_STATUS_IS_OK(nt_status)) {
2847                 return nt_status;
2848         }
2849
2850         /* Get the auth blob from the reply. */
2851         if(!smb_io_rpc_hdr("rpc_hdr   ", phdr, rbuf, 0)) {
2852                 DEBUG(0,("rpc_finish_spnego_ntlmssp_bind: Failed to unmarshall RPC_HDR.\n"));
2853                 return NT_STATUS_BUFFER_TOO_SMALL;
2854         }
2855
2856         if (!prs_set_offset(rbuf, phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2857                 return NT_STATUS_INVALID_PARAMETER;
2858         }
2859
2860         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rbuf, 0)) {
2861                 return NT_STATUS_INVALID_PARAMETER;
2862         }
2863
2864         server_spnego_response = data_blob(NULL, phdr->auth_len);
2865         prs_copy_data_out((char *)server_spnego_response.data, rbuf, phdr->auth_len);
2866
2867         /* Check we got a valid auth response. */
2868         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK, OID_NTLMSSP, &tmp_blob)) {
2869                 data_blob_free(&server_spnego_response);
2870                 data_blob_free(&tmp_blob);
2871                 return NT_STATUS_INVALID_PARAMETER;
2872         }
2873
2874         data_blob_free(&server_spnego_response);
2875         data_blob_free(&tmp_blob);
2876
2877         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2878                  "%s.\n", rpccli_pipe_txt(debug_ctx(), cli)));
2879
2880         return NT_STATUS_OK;
2881 }
2882
2883 /****************************************************************************
2884  Do an rpc bind.
2885 ****************************************************************************/
2886
2887 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2888                        struct cli_pipe_auth_data *auth)
2889 {
2890         RPC_HDR hdr;
2891         RPC_HDR_BA hdr_ba;
2892         prs_struct rpc_out;
2893         prs_struct rbuf;
2894         uint32 rpc_call_id;
2895         NTSTATUS status;
2896
2897         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2898                 rpccli_pipe_txt(debug_ctx(), cli),
2899                 (unsigned int)auth->auth_type,
2900                 (unsigned int)auth->auth_level ));
2901
2902         cli->auth = talloc_move(cli, &auth);
2903
2904         prs_init_empty(&rpc_out, talloc_tos(), MARSHALL);
2905
2906         rpc_call_id = get_rpc_call_id();
2907
2908         /* Marshall the outgoing data. */
2909         status = create_rpc_bind_req(cli, &rpc_out, rpc_call_id,
2910                                 &cli->abstract_syntax,
2911                                 &cli->transfer_syntax,
2912                                 cli->auth->auth_type,
2913                                 cli->auth->auth_level);
2914
2915         if (!NT_STATUS_IS_OK(status)) {
2916                 prs_mem_free(&rpc_out);
2917                 return status;
2918         }
2919
2920         /* send data on \PIPE\.  receive a response */
2921         status = rpc_api_pipe(talloc_tos(), cli, &rpc_out, &rbuf, RPC_BINDACK);
2922         prs_mem_free(&rpc_out);
2923         if (!NT_STATUS_IS_OK(status)) {
2924                 return status;
2925         }
2926
2927         DEBUG(3,("rpc_pipe_bind: %s bind request returned ok.\n",
2928                  rpccli_pipe_txt(debug_ctx(), cli)));
2929
2930         /* Unmarshall the RPC header */
2931         if(!smb_io_rpc_hdr("hdr"   , &hdr, &rbuf, 0)) {
2932                 DEBUG(0,("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2933                 prs_mem_free(&rbuf);
2934                 return NT_STATUS_BUFFER_TOO_SMALL;
2935         }
2936
2937         if(!smb_io_rpc_hdr_ba("", &hdr_ba, &rbuf, 0)) {
2938                 DEBUG(0,("rpc_pipe_bind: Failed to unmarshall RPC_HDR_BA.\n"));
2939                 prs_mem_free(&rbuf);
2940                 return NT_STATUS_BUFFER_TOO_SMALL;
2941         }
2942
2943         if(!check_bind_response(&hdr_ba, &cli->transfer_syntax)) {
2944                 DEBUG(2,("rpc_pipe_bind: check_bind_response failed.\n"));
2945                 prs_mem_free(&rbuf);
2946                 return NT_STATUS_BUFFER_TOO_SMALL;
2947         }
2948
2949         cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2950         cli->max_recv_frag = hdr_ba.bba.max_rsize;
2951
2952         /* For authenticated binds we may need to do 3 or 4 leg binds. */
2953         switch(cli->auth->auth_type) {
2954
2955                 case PIPE_AUTH_TYPE_NONE:
2956                 case PIPE_AUTH_TYPE_SCHANNEL:
2957                         /* Bind complete. */
2958                         break;
2959
2960                 case PIPE_AUTH_TYPE_NTLMSSP:
2961                         /* Need to send AUTH3 packet - no reply. */
2962                         status = rpc_finish_auth3_bind(
2963                                 cli, &hdr, &rbuf, rpc_call_id,
2964                                 cli->auth->auth_type,
2965                                 cli->auth->auth_level);
2966                         if (!NT_STATUS_IS_OK(status)) {
2967                                 prs_mem_free(&rbuf);
2968                                 return status;
2969                         }
2970                         break;
2971
2972                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2973                         /* Need to send alter context request and reply. */
2974                         status = rpc_finish_spnego_ntlmssp_bind(
2975                                 cli, &hdr, &rbuf, rpc_call_id,
2976                                 &cli->abstract_syntax, &cli->transfer_syntax,
2977                                 cli->auth->auth_type, cli->auth->auth_level);
2978                         if (!NT_STATUS_IS_OK(status)) {
2979                                 prs_mem_free(&rbuf);
2980                                 return status;
2981                         }
2982                         break;
2983
2984                 case PIPE_AUTH_TYPE_KRB5:
2985                         /* */
2986
2987                 default:
2988                         DEBUG(0,("cli_finish_bind_auth: unknown auth type "
2989                                  "%u\n", (unsigned int)cli->auth->auth_type));
2990                         prs_mem_free(&rbuf);
2991                         return NT_STATUS_INVALID_INFO_CLASS;
2992         }
2993
2994         /* For NTLMSSP ensure the server gave us the auth_level we wanted. */
2995         if (cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP
2996             || cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP) {
2997                 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
2998                         if (!(cli->auth->a_u.ntlmssp_state->neg_flags & NTLMSSP_NEGOTIATE_SIGN)) {
2999                                 DEBUG(0,("cli_finish_bind_auth: requested NTLMSSSP signing and server refused.\n"));
3000                                 prs_mem_free(&rbuf);
3001                                 return NT_STATUS_INVALID_PARAMETER;
3002                         }
3003                 }
3004                 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3005                         if (!(cli->auth->a_u.ntlmssp_state->neg_flags & NTLMSSP_NEGOTIATE_SEAL)) {
3006                                 DEBUG(0,("cli_finish_bind_auth: requested NTLMSSSP sealing and server refused.\n"));
3007                                 prs_mem_free(&rbuf);
3008                                 return NT_STATUS_INVALID_PARAMETER;
3009                         }
3010                 }
3011         }
3012
3013         prs_mem_free(&rbuf);
3014         return NT_STATUS_OK;
3015 }
3016
3017 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3018                                 unsigned int timeout)
3019 {
3020         return cli_set_timeout(cli->trans.np.cli, timeout);
3021 }
3022
3023 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3024 {
3025         if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3026             || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3027                 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3028                 return true;
3029         }
3030
3031         if (cli->transport_type == NCACN_NP) {
3032                 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3033                 return true;
3034         }
3035
3036         return false;
3037 }
3038
3039 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3040 {
3041         if (p->transport_type == NCACN_NP) {
3042                 return p->trans.np.cli;
3043         }
3044         return NULL;
3045 }
3046
3047 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3048 {
3049         if (p->transport_type == NCACN_NP) {
3050                 bool ret;
3051                 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3052                 if (!ret) {
3053                         DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3054                                   "pipe %s. Error was %s\n",
3055                                   rpccli_pipe_txt(debug_ctx(), p),
3056                                   cli_errstr(p->trans.np.cli)));
3057                 }
3058
3059                 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3060                            rpccli_pipe_txt(debug_ctx(), p)));
3061
3062                 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3063                 return ret ? -1 : 0;
3064         }
3065
3066         return -1;
3067 }
3068
3069 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3070                                struct cli_pipe_auth_data **presult)
3071 {
3072         struct cli_pipe_auth_data *result;
3073
3074         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3075         if (result == NULL) {
3076                 return NT_STATUS_NO_MEMORY;
3077         }
3078
3079         result->auth_type = PIPE_AUTH_TYPE_NONE;
3080         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3081
3082         result->user_name = talloc_strdup(result, "");
3083         result->domain = talloc_strdup(result, "");
3084         if ((result->user_name == NULL) || (result->domain == NULL)) {
3085                 TALLOC_FREE(result);
3086                 return NT_STATUS_NO_MEMORY;
3087         }
3088
3089         *presult = result;
3090         return NT_STATUS_OK;
3091 }
3092
3093 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3094 {
3095         ntlmssp_end(&auth->a_u.ntlmssp_state);
3096         return 0;
3097 }
3098
3099 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3100                                   enum pipe_auth_type auth_type,
3101                                   enum pipe_auth_level auth_level,
3102                                   const char *domain,
3103                                   const char *username,
3104                                   const char *password,
3105                                   struct cli_pipe_auth_data **presult)
3106 {
3107         struct cli_pipe_auth_data *result;
3108         NTSTATUS status;
3109
3110         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3111         if (result == NULL) {
3112                 return NT_STATUS_NO_MEMORY;
3113         }
3114
3115         result->auth_type = auth_type;
3116         result->auth_level = auth_level;
3117
3118         result->user_name = talloc_strdup(result, username);
3119         result->domain = talloc_strdup(result, domain);
3120         if ((result->user_name == NULL) || (result->domain == NULL)) {
3121                 status = NT_STATUS_NO_MEMORY;
3122                 goto fail;
3123         }
3124
3125         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3126         if (!NT_STATUS_IS_OK(status)) {
3127                 goto fail;
3128         }
3129
3130         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3131
3132         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3133         if (!NT_STATUS_IS_OK(status)) {
3134                 goto fail;
3135         }
3136
3137         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3138         if (!NT_STATUS_IS_OK(status)) {
3139                 goto fail;
3140         }
3141
3142         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3143         if (!NT_STATUS_IS_OK(status)) {
3144                 goto fail;
3145         }
3146
3147         /*
3148          * Turn off sign+seal to allow selected auth level to turn it back on.
3149          */
3150         result->a_u.ntlmssp_state->neg_flags &=
3151                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3152
3153         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3154                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3155         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3156                 result->a_u.ntlmssp_state->neg_flags
3157                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3158         }
3159
3160         *presult = result;
3161         return NT_STATUS_OK;
3162
3163  fail:
3164         TALLOC_FREE(result);
3165         return status;
3166 }
3167
3168 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3169                                    enum pipe_auth_level auth_level,
3170                                    const uint8_t sess_key[16],
3171                                    struct cli_pipe_auth_data **presult)
3172 {
3173         struct cli_pipe_auth_data *result;
3174
3175         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3176         if (result == NULL) {
3177                 return NT_STATUS_NO_MEMORY;
3178         }
3179
3180         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3181         result->auth_level = auth_level;
3182
3183         result->user_name = talloc_strdup(result, "");
3184         result->domain = talloc_strdup(result, domain);
3185         if ((result->user_name == NULL) || (result->domain == NULL)) {
3186                 goto fail;
3187         }
3188
3189         result->a_u.schannel_auth = talloc(result,
3190                                            struct schannel_auth_struct);
3191         if (result->a_u.schannel_auth == NULL) {
3192                 goto fail;
3193         }
3194
3195         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3196                sizeof(result->a_u.schannel_auth->sess_key));
3197         result->a_u.schannel_auth->seq_num = 0;
3198
3199         *presult = result;
3200         return NT_STATUS_OK;
3201
3202  fail:
3203         TALLOC_FREE(result);
3204         return NT_STATUS_NO_MEMORY;
3205 }
3206
3207 #ifdef HAVE_KRB5
3208 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3209 {
3210         data_blob_free(&auth->session_key);
3211         return 0;
3212 }
3213 #endif
3214
3215 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3216                                    enum pipe_auth_level auth_level,
3217                                    const char *service_princ,
3218                                    const char *username,
3219                                    const char *password,
3220                                    struct cli_pipe_auth_data **presult)
3221 {
3222 #ifdef HAVE_KRB5
3223         struct cli_pipe_auth_data *result;
3224
3225         if ((username != NULL) && (password != NULL)) {
3226                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3227                 if (ret != 0) {
3228                         return NT_STATUS_ACCESS_DENIED;
3229                 }
3230         }
3231
3232         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3233         if (result == NULL) {
3234                 return NT_STATUS_NO_MEMORY;
3235         }
3236
3237         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3238         result->auth_level = auth_level;
3239
3240         /*
3241          * Username / domain need fixing!
3242          */
3243         result->user_name = talloc_strdup(result, "");
3244         result->domain = talloc_strdup(result, "");
3245         if ((result->user_name == NULL) || (result->domain == NULL)) {
3246                 goto fail;
3247         }
3248
3249         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3250                 result, struct kerberos_auth_struct);
3251         if (result->a_u.kerberos_auth == NULL) {
3252                 goto fail;
3253         }
3254         talloc_set_destructor(result->a_u.kerberos_auth,
3255                               cli_auth_kerberos_data_destructor);
3256
3257         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3258                 result, service_princ);
3259         if (result->a_u.kerberos_auth->service_principal == NULL) {
3260                 goto fail;
3261         }
3262
3263         *presult = result;
3264         return NT_STATUS_OK;
3265
3266  fail:
3267         TALLOC_FREE(result);
3268         return NT_STATUS_NO_MEMORY;
3269 #else
3270         return NT_STATUS_NOT_SUPPORTED;
3271 #endif
3272 }
3273
3274 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3275 {
3276         close(p->trans.sock.fd);
3277         return 0;
3278 }
3279
3280 /**
3281  * Create an rpc pipe client struct, connecting to a tcp port.
3282  */
3283 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3284                                        uint16_t port,
3285                                        const struct ndr_syntax_id *abstract_syntax,
3286                                        struct rpc_pipe_client **presult)
3287 {
3288         struct rpc_pipe_client *result;
3289         struct sockaddr_storage addr;
3290         NTSTATUS status;
3291
3292         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3293         if (result == NULL) {
3294                 return NT_STATUS_NO_MEMORY;
3295         }
3296
3297         result->transport_type = NCACN_IP_TCP;
3298
3299         result->abstract_syntax = *abstract_syntax;
3300         result->transfer_syntax = ndr_transfer_syntax;
3301
3302         result->desthost = talloc_strdup(result, host);
3303         result->srv_name_slash = talloc_asprintf_strupper_m(
3304                 result, "\\\\%s", result->desthost);
3305         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3306                 status = NT_STATUS_NO_MEMORY;
3307                 goto fail;
3308         }
3309
3310         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3311         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3312
3313         if (!resolve_name(host, &addr, 0)) {
3314                 status = NT_STATUS_NOT_FOUND;
3315                 goto fail;
3316         }
3317
3318         status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3319         if (!NT_STATUS_IS_OK(status)) {
3320                 goto fail;
3321         }
3322
3323         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3324
3325         *presult = result;
3326         return NT_STATUS_OK;
3327
3328  fail:
3329         TALLOC_FREE(result);
3330         return status;
3331 }
3332
3333 /**
3334  * Determine the tcp port on which a dcerpc interface is listening
3335  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3336  * target host.
3337  */
3338 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3339                                       const struct ndr_syntax_id *abstract_syntax,
3340                                       uint16_t *pport)
3341 {
3342         NTSTATUS status;
3343         struct rpc_pipe_client *epm_pipe = NULL;
3344         struct cli_pipe_auth_data *auth = NULL;
3345         struct dcerpc_binding *map_binding = NULL;
3346         struct dcerpc_binding *res_binding = NULL;
3347         struct epm_twr_t *map_tower = NULL;
3348         struct epm_twr_t *res_towers = NULL;
3349         struct policy_handle *entry_handle = NULL;
3350         uint32_t num_towers = 0;
3351         uint32_t max_towers = 1;
3352         struct epm_twr_p_t towers;
3353         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3354
3355         if (pport == NULL) {
3356                 status = NT_STATUS_INVALID_PARAMETER;
3357                 goto done;
3358         }
3359
3360         /* open the connection to the endpoint mapper */
3361         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3362                                         &ndr_table_epmapper.syntax_id,
3363                                         &epm_pipe);
3364
3365         if (!NT_STATUS_IS_OK(status)) {
3366                 goto done;
3367         }
3368
3369         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3370         if (!NT_STATUS_IS_OK(status)) {
3371                 goto done;
3372         }
3373
3374         status = rpc_pipe_bind(epm_pipe, auth);
3375         if (!NT_STATUS_IS_OK(status)) {
3376                 goto done;
3377         }
3378
3379         /* create tower for asking the epmapper */
3380
3381         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3382         if (map_binding == NULL) {
3383                 status = NT_STATUS_NO_MEMORY;
3384                 goto done;
3385         }
3386
3387         map_binding->transport = NCACN_IP_TCP;
3388         map_binding->object = *abstract_syntax;
3389         map_binding->host = host; /* needed? */
3390         map_binding->endpoint = "0"; /* correct? needed? */
3391
3392         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3393         if (map_tower == NULL) {
3394                 status = NT_STATUS_NO_MEMORY;
3395                 goto done;
3396         }
3397
3398         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3399                                             &(map_tower->tower));
3400         if (!NT_STATUS_IS_OK(status)) {
3401                 goto done;
3402         }
3403
3404         /* allocate further parameters for the epm_Map call */
3405
3406         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3407         if (res_towers == NULL) {
3408                 status = NT_STATUS_NO_MEMORY;
3409                 goto done;
3410         }
3411         towers.twr = res_towers;
3412
3413         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3414         if (entry_handle == NULL) {
3415                 status = NT_STATUS_NO_MEMORY;
3416                 goto done;
3417         }
3418
3419         /* ask the endpoint mapper for the port */
3420
3421         status = rpccli_epm_Map(epm_pipe,
3422                                 tmp_ctx,
3423                                 CONST_DISCARD(struct GUID *,
3424                                               &(abstract_syntax->uuid)),
3425                                 map_tower,
3426                                 entry_handle,
3427                                 max_towers,
3428                                 &num_towers,
3429                                 &towers);
3430
3431         if (!NT_STATUS_IS_OK(status)) {
3432                 goto done;
3433         }
3434
3435         if (num_towers != 1) {
3436                 status = NT_STATUS_UNSUCCESSFUL;
3437                 goto done;
3438         }
3439
3440         /* extract the port from the answer */
3441
3442         status = dcerpc_binding_from_tower(tmp_ctx,
3443                                            &(towers.twr->tower),
3444                                            &res_binding);
3445         if (!NT_STATUS_IS_OK(status)) {
3446                 goto done;
3447         }
3448
3449         /* are further checks here necessary? */
3450         if (res_binding->transport != NCACN_IP_TCP) {
3451                 status = NT_STATUS_UNSUCCESSFUL;
3452                 goto done;
3453         }
3454
3455         *pport = (uint16_t)atoi(res_binding->endpoint);
3456
3457 done:
3458         TALLOC_FREE(tmp_ctx);
3459         return status;
3460 }
3461
3462 /**
3463  * Create a rpc pipe client struct, connecting to a host via tcp.
3464  * The port is determined by asking the endpoint mapper on the given
3465  * host.
3466  */
3467 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3468                            const struct ndr_syntax_id *abstract_syntax,
3469                            struct rpc_pipe_client **presult)
3470 {
3471         NTSTATUS status;
3472         uint16_t port = 0;
3473
3474         *presult = NULL;
3475
3476         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3477         if (!NT_STATUS_IS_OK(status)) {
3478                 goto done;
3479         }
3480
3481         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3482                                         abstract_syntax, presult);
3483
3484 done:
3485         return status;
3486 }
3487
3488 /********************************************************************
3489  Create a rpc pipe client struct, connecting to a unix domain socket
3490  ********************************************************************/
3491 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3492                                const struct ndr_syntax_id *abstract_syntax,
3493                                struct rpc_pipe_client **presult)
3494 {
3495         struct rpc_pipe_client *result;
3496         struct sockaddr_un addr;
3497         NTSTATUS status;
3498
3499         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3500         if (result == NULL) {
3501                 return NT_STATUS_NO_MEMORY;
3502         }
3503
3504         result->transport_type = NCACN_UNIX_STREAM;
3505
3506         result->abstract_syntax = *abstract_syntax;
3507         result->transfer_syntax = ndr_transfer_syntax;
3508
3509         result->desthost = talloc_get_myname(result);
3510         result->srv_name_slash = talloc_asprintf_strupper_m(
3511                 result, "\\\\%s", result->desthost);
3512         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3513                 status = NT_STATUS_NO_MEMORY;
3514                 goto fail;
3515         }
3516
3517         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3518         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3519
3520         result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3521         if (result->trans.sock.fd == -1) {
3522                 status = map_nt_error_from_unix(errno);
3523                 goto fail;
3524         }
3525
3526         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3527
3528         ZERO_STRUCT(addr);
3529         addr.sun_family = AF_UNIX;
3530         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3531
3532         if (sys_connect(result->trans.sock.fd,
3533                         (struct sockaddr *)&addr) == -1) {
3534                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3535                           strerror(errno)));
3536                 close(result->trans.sock.fd);
3537                 return map_nt_error_from_unix(errno);
3538         }
3539
3540         *presult = result;
3541         return NT_STATUS_OK;
3542
3543  fail:
3544         TALLOC_FREE(result);
3545         return status;
3546 }
3547
3548
3549 /****************************************************************************
3550  Open a named pipe over SMB to a remote server.
3551  *
3552  * CAVEAT CALLER OF THIS FUNCTION:
3553  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3554  *    so be sure that this function is called AFTER any structure (vs pointer)
3555  *    assignment of the cli.  In particular, libsmbclient does structure
3556  *    assignments of cli, which invalidates the data in the returned
3557  *    rpc_pipe_client if this function is called before the structure assignment
3558  *    of cli.
3559  * 
3560  ****************************************************************************/
3561
3562 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3563                                  const struct ndr_syntax_id *abstract_syntax,
3564                                  struct rpc_pipe_client **presult)
3565 {
3566         struct rpc_pipe_client *result;
3567         int fnum;
3568
3569         /* sanity check to protect against crashes */
3570
3571         if ( !cli ) {
3572                 return NT_STATUS_INVALID_HANDLE;
3573         }
3574
3575         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3576         if (result == NULL) {
3577                 return NT_STATUS_NO_MEMORY;
3578         }
3579
3580         result->transport_type = NCACN_NP;
3581
3582         result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3583                 result, cli, abstract_syntax);
3584         if (result->trans.np.pipe_name == NULL) {
3585                 DEBUG(1, ("Could not find pipe for interface\n"));
3586                 TALLOC_FREE(result);
3587                 return NT_STATUS_INVALID_PARAMETER;
3588         }
3589
3590         result->trans.np.cli = cli;
3591         result->abstract_syntax = *abstract_syntax;
3592         result->transfer_syntax = ndr_transfer_syntax;
3593         result->desthost = talloc_strdup(result, cli->desthost);
3594         result->srv_name_slash = talloc_asprintf_strupper_m(
3595                 result, "\\\\%s", result->desthost);
3596
3597         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3598         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3599
3600         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3601                 TALLOC_FREE(result);
3602                 return NT_STATUS_NO_MEMORY;
3603         }
3604
3605         fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3606                              DESIRED_ACCESS_PIPE);
3607         if (fnum == -1) {
3608                 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3609                          "to machine %s.  Error was %s\n",
3610                          result->trans.np.pipe_name, cli->desthost,
3611                          cli_errstr(cli)));
3612                 TALLOC_FREE(result);
3613                 return cli_get_nt_error(cli);
3614         }
3615
3616         result->trans.np.fnum = fnum;
3617
3618         DLIST_ADD(cli->pipe_list, result);
3619         talloc_set_destructor(result, rpc_pipe_destructor);
3620
3621         *presult = result;
3622         return NT_STATUS_OK;
3623 }
3624
3625 /****************************************************************************
3626  Open a pipe to a remote server.
3627  ****************************************************************************/
3628
3629 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3630                                   const struct ndr_syntax_id *interface,
3631                                   struct rpc_pipe_client **presult)
3632 {
3633         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3634                 /*
3635                  * We should have a better way to figure out this drsuapi
3636                  * speciality...
3637                  */
3638                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3639                                          presult);
3640         }
3641
3642         return rpc_pipe_open_np(cli, interface, presult);
3643 }
3644
3645 /****************************************************************************
3646  Open a named pipe to an SMB server and bind anonymously.
3647  ****************************************************************************/
3648
3649 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3650                                   const struct ndr_syntax_id *interface,
3651                                   struct rpc_pipe_client **presult)
3652 {
3653         struct rpc_pipe_client *result;
3654         struct cli_pipe_auth_data *auth;
3655         NTSTATUS status;
3656
3657         status = cli_rpc_pipe_open(cli, interface, &result);
3658         if (!NT_STATUS_IS_OK(status)) {
3659                 return status;
3660         }
3661
3662         status = rpccli_anon_bind_data(result, &auth);
3663         if (!NT_STATUS_IS_OK(status)) {
3664                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3665                           nt_errstr(status)));
3666                 TALLOC_FREE(result);
3667                 return status;
3668         }
3669
3670         /*
3671          * This is a bit of an abstraction violation due to the fact that an
3672          * anonymous bind on an authenticated SMB inherits the user/domain
3673          * from the enclosing SMB creds
3674          */
3675
3676         TALLOC_FREE(auth->user_name);
3677         TALLOC_FREE(auth->domain);
3678
3679         auth->user_name = talloc_strdup(auth, cli->user_name);
3680         auth->domain = talloc_strdup(auth, cli->domain);
3681         auth->user_session_key = data_blob_talloc(auth,
3682                 cli->user_session_key.data,
3683                 cli->user_session_key.length);
3684
3685         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3686                 TALLOC_FREE(result);
3687                 return NT_STATUS_NO_MEMORY;
3688         }
3689
3690         status = rpc_pipe_bind(result, auth);
3691         if (!NT_STATUS_IS_OK(status)) {
3692                 int lvl = 0;
3693                 if (ndr_syntax_id_equal(interface,
3694                                         &ndr_table_dssetup.syntax_id)) {
3695                         /* non AD domains just don't have this pipe, avoid
3696                          * level 0 statement in that case - gd */
3697                         lvl = 3;
3698                 }
3699                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3700                             "%s failed with error %s\n",
3701                             cli_get_pipe_name_from_iface(debug_ctx(), cli,
3702                                                          interface),
3703                             nt_errstr(status) ));
3704                 TALLOC_FREE(result);
3705                 return status;
3706         }
3707
3708         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3709                   "%s and bound anonymously.\n", result->trans.np.pipe_name,
3710                   cli->desthost ));
3711
3712         *presult = result;
3713         return NT_STATUS_OK;
3714 }
3715
3716 /****************************************************************************
3717  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3718  ****************************************************************************/
3719
3720 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3721                                                    const struct ndr_syntax_id *interface,
3722                                                    enum pipe_auth_type auth_type,
3723                                                    enum pipe_auth_level auth_level,
3724                                                    const char *domain,
3725                                                    const char *username,
3726                                                    const char *password,
3727                                                    struct rpc_pipe_client **presult)
3728 {
3729         struct rpc_pipe_client *result;
3730         struct cli_pipe_auth_data *auth;
3731         NTSTATUS status;
3732
3733         status = cli_rpc_pipe_open(cli, interface, &result);
3734         if (!NT_STATUS_IS_OK(status)) {
3735                 return status;
3736         }
3737
3738         status = rpccli_ntlmssp_bind_data(
3739                 result, auth_type, auth_level, domain, username,
3740                 cli->pwd.null_pwd ? NULL : password, &auth);
3741         if (!NT_STATUS_IS_OK(status)) {
3742                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3743                           nt_errstr(status)));
3744                 goto err;
3745         }
3746
3747         status = rpc_pipe_bind(result, auth);
3748         if (!NT_STATUS_IS_OK(status)) {
3749                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3750                         nt_errstr(status) ));
3751                 goto err;
3752         }
3753
3754         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3755                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3756                 result->trans.np.pipe_name, cli->desthost,
3757                 domain, username ));
3758
3759         *presult = result;
3760         return NT_STATUS_OK;
3761
3762   err:
3763
3764         TALLOC_FREE(result);
3765         return status;
3766 }
3767
3768 /****************************************************************************
3769  External interface.
3770  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3771  ****************************************************************************/
3772
3773 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3774                                    const struct ndr_syntax_id *interface,
3775                                    enum pipe_auth_level auth_level,
3776                                    const char *domain,
3777                                    const char *username,
3778                                    const char *password,
3779                                    struct rpc_pipe_client **presult)
3780 {
3781         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3782                                                 interface,
3783                                                 PIPE_AUTH_TYPE_NTLMSSP,
3784                                                 auth_level,
3785                                                 domain,
3786                                                 username,
3787                                                 password,
3788                                                 presult);
3789 }
3790
3791 /****************************************************************************
3792  External interface.
3793  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3794  ****************************************************************************/
3795
3796 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3797                                           const struct ndr_syntax_id *interface,
3798                                           enum pipe_auth_level auth_level,
3799                                           const char *domain,
3800                                           const char *username,
3801                                           const char *password,
3802                                           struct rpc_pipe_client **presult)
3803 {
3804         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3805                                                 interface,
3806                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3807                                                 auth_level,
3808                                                 domain,
3809                                                 username,
3810                                                 password,
3811                                                 presult);
3812 }
3813
3814 /****************************************************************************
3815   Get a the schannel session key out of an already opened netlogon pipe.
3816  ****************************************************************************/
3817 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3818                                                 struct cli_state *cli,
3819                                                 const char *domain,
3820                                                 uint32 *pneg_flags)
3821 {
3822         uint32 sec_chan_type = 0;
3823         unsigned char machine_pwd[16];
3824         const char *machine_account;
3825         NTSTATUS status;
3826
3827         /* Get the machine account credentials from secrets.tdb. */
3828         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3829                                &sec_chan_type))
3830         {
3831                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3832                         "trust account password for domain '%s'\n",
3833                         domain));
3834                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3835         }
3836
3837         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3838                                         cli->desthost, /* server name */
3839                                         domain,        /* domain */
3840                                         global_myname(), /* client name */
3841                                         machine_account, /* machine account name */
3842                                         machine_pwd,
3843                                         sec_chan_type,
3844                                         pneg_flags);
3845
3846         if (!NT_STATUS_IS_OK(status)) {
3847                 DEBUG(3, ("get_schannel_session_key_common: "
3848                           "rpccli_netlogon_setup_creds failed with result %s "
3849                           "to server %s, domain %s, machine account %s.\n",
3850                           nt_errstr(status), cli->desthost, domain,
3851                           machine_account ));
3852                 return status;
3853         }
3854
3855         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3856                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3857                         cli->desthost));
3858                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3859         }
3860
3861         return NT_STATUS_OK;;
3862 }
3863
3864 /****************************************************************************
3865  Open a netlogon pipe and get the schannel session key.
3866  Now exposed to external callers.
3867  ****************************************************************************/
3868
3869
3870 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3871                                   const char *domain,
3872                                   uint32 *pneg_flags,
3873                                   struct rpc_pipe_client **presult)
3874 {
3875         struct rpc_pipe_client *netlogon_pipe = NULL;
3876         NTSTATUS status;
3877
3878         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3879                                           &netlogon_pipe);
3880         if (!NT_STATUS_IS_OK(status)) {
3881                 return status;
3882         }
3883
3884         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3885                                                  pneg_flags);
3886         if (!NT_STATUS_IS_OK(status)) {
3887                 TALLOC_FREE(netlogon_pipe);
3888                 return status;
3889         }
3890
3891         *presult = netlogon_pipe;
3892         return NT_STATUS_OK;
3893 }
3894
3895 /****************************************************************************
3896  External interface.
3897  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3898  using session_key. sign and seal.
3899  ****************************************************************************/
3900
3901 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3902                                              const struct ndr_syntax_id *interface,
3903                                              enum pipe_auth_level auth_level,
3904                                              const char *domain,
3905                                              const struct dcinfo *pdc,
3906                                              struct rpc_pipe_client **presult)
3907 {
3908         struct rpc_pipe_client *result;
3909         struct cli_pipe_auth_data *auth;
3910         NTSTATUS status;
3911
3912         status = cli_rpc_pipe_open(cli, interface, &result);
3913         if (!NT_STATUS_IS_OK(status)) {
3914                 return status;
3915         }
3916
3917         status = rpccli_schannel_bind_data(result, domain, auth_level,
3918                                            pdc->sess_key, &auth);
3919         if (!NT_STATUS_IS_OK(status)) {
3920                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3921                           nt_errstr(status)));
3922                 TALLOC_FREE(result);
3923                 return status;
3924         }
3925
3926         status = rpc_pipe_bind(result, auth);
3927         if (!NT_STATUS_IS_OK(status)) {
3928                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3929                           "cli_rpc_pipe_bind failed with error %s\n",
3930                           nt_errstr(status) ));
3931                 TALLOC_FREE(result);
3932                 return status;
3933         }
3934
3935         /*
3936          * The credentials on a new netlogon pipe are the ones we are passed
3937          * in - copy them over.
3938          */
3939         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3940         if (result->dc == NULL) {
3941                 DEBUG(0, ("talloc failed\n"));
3942                 TALLOC_FREE(result);
3943                 return NT_STATUS_NO_MEMORY;
3944         }
3945
3946         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3947                 "for domain %s "
3948                 "and bound using schannel.\n",
3949                 result->trans.np.pipe_name, cli->desthost, domain ));
3950
3951         *presult = result;
3952         return NT_STATUS_OK;
3953 }
3954
3955 /****************************************************************************
3956  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3957  Fetch the session key ourselves using a temporary netlogon pipe. This
3958  version uses an ntlmssp auth bound netlogon pipe to get the key.
3959  ****************************************************************************/
3960
3961 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3962                                                       const char *domain,
3963                                                       const char *username,
3964                                                       const char *password,
3965                                                       uint32 *pneg_flags,
3966                                                       struct rpc_pipe_client **presult)
3967 {
3968         struct rpc_pipe_client *netlogon_pipe = NULL;
3969         NTSTATUS status;
3970
3971         status = cli_rpc_pipe_open_spnego_ntlmssp(
3972                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3973                 domain, username, password, &netlogon_pipe);
3974         if (!NT_STATUS_IS_OK(status)) {
3975                 return status;
3976         }
3977
3978         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3979                                                  pneg_flags);
3980         if (!NT_STATUS_IS_OK(status)) {
3981                 TALLOC_FREE(netlogon_pipe);
3982                 return status;
3983         }
3984
3985         *presult = netlogon_pipe;
3986         return NT_STATUS_OK;
3987 }
3988
3989 /****************************************************************************
3990  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3991  Fetch the session key ourselves using a temporary netlogon pipe. This version
3992  uses an ntlmssp bind to get the session key.
3993  ****************************************************************************/
3994
3995 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3996                                                  const struct ndr_syntax_id *interface,
3997                                                  enum pipe_auth_level auth_level,
3998                                                  const char *domain,
3999                                                  const char *username,
4000                                                  const char *password,
4001                                                  struct rpc_pipe_client **presult)
4002 {
4003         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4004         struct rpc_pipe_client *netlogon_pipe = NULL;
4005         struct rpc_pipe_client *result = NULL;
4006         NTSTATUS status;
4007
4008         status = get_schannel_session_key_auth_ntlmssp(
4009                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4010         if (!NT_STATUS_IS_OK(status)) {
4011                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4012                         "key from server %s for domain %s.\n",
4013                         cli->desthost, domain ));
4014                 return status;
4015         }
4016
4017         status = cli_rpc_pipe_open_schannel_with_key(
4018                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4019                 &result);
4020
4021         /* Now we've bound using the session key we can close the netlog pipe. */
4022         TALLOC_FREE(netlogon_pipe);
4023
4024         if (NT_STATUS_IS_OK(status)) {
4025                 *presult = result;
4026         }
4027         return status;
4028 }
4029
4030 /****************************************************************************
4031  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4032  Fetch the session key ourselves using a temporary netlogon pipe.
4033  ****************************************************************************/
4034
4035 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4036                                     const struct ndr_syntax_id *interface,
4037                                     enum pipe_auth_level auth_level,
4038                                     const char *domain,
4039                                     struct rpc_pipe_client **presult)
4040 {
4041         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4042         struct rpc_pipe_client *netlogon_pipe = NULL;
4043         struct rpc_pipe_client *result = NULL;
4044         NTSTATUS status;
4045
4046         status = get_schannel_session_key(cli, domain, &neg_flags,
4047                                           &netlogon_pipe);
4048         if (!NT_STATUS_IS_OK(status)) {
4049                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4050                         "key from server %s for domain %s.\n",
4051                         cli->desthost, domain ));
4052                 return status;
4053         }
4054
4055         status = cli_rpc_pipe_open_schannel_with_key(
4056                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4057                 &result);
4058
4059         /* Now we've bound using the session key we can close the netlog pipe. */
4060         TALLOC_FREE(netlogon_pipe);
4061
4062         if (NT_STATUS_IS_OK(status)) {
4063                 *presult = result;
4064         }
4065
4066         return NT_STATUS_OK;
4067 }
4068
4069 /****************************************************************************
4070  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4071  The idea is this can be called with service_princ, username and password all
4072  NULL so long as the caller has a TGT.
4073  ****************************************************************************/
4074
4075 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4076                                 const struct ndr_syntax_id *interface,
4077                                 enum pipe_auth_level auth_level,
4078                                 const char *service_princ,
4079                                 const char *username,
4080                                 const char *password,
4081                                 struct rpc_pipe_client **presult)
4082 {
4083 #ifdef HAVE_KRB5
4084         struct rpc_pipe_client *result;
4085         struct cli_pipe_auth_data *auth;
4086         NTSTATUS status;
4087
4088         status = cli_rpc_pipe_open(cli, interface, &result);
4089         if (!NT_STATUS_IS_OK(status)) {
4090                 return status;
4091         }
4092
4093         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4094                                            username, password, &auth);
4095         if (!NT_STATUS_IS_OK(status)) {
4096                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4097                           nt_errstr(status)));
4098                 TALLOC_FREE(result);
4099                 return status;
4100         }
4101
4102         status = rpc_pipe_bind(result, auth);
4103         if (!NT_STATUS_IS_OK(status)) {
4104                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4105                           "with error %s\n", nt_errstr(status)));
4106                 TALLOC_FREE(result);
4107                 return status;
4108         }
4109
4110         *presult = result;
4111         return NT_STATUS_OK;
4112 #else
4113         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4114         return NT_STATUS_NOT_IMPLEMENTED;
4115 #endif
4116 }
4117
4118 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4119                              struct rpc_pipe_client *cli,
4120                              DATA_BLOB *session_key)
4121 {
4122         if (!session_key || !cli) {
4123                 return NT_STATUS_INVALID_PARAMETER;
4124         }
4125
4126         if (!cli->auth) {
4127                 return NT_STATUS_INVALID_PARAMETER;
4128         }
4129
4130         switch (cli->auth->auth_type) {
4131                 case PIPE_AUTH_TYPE_SCHANNEL:
4132                         *session_key = data_blob_talloc(mem_ctx,
4133                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4134                         break;
4135                 case PIPE_AUTH_TYPE_NTLMSSP:
4136                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4137                         *session_key = data_blob_talloc(mem_ctx,
4138                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4139                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4140                         break;
4141                 case PIPE_AUTH_TYPE_KRB5:
4142                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4143                         *session_key = data_blob_talloc(mem_ctx,
4144                                 cli->auth->a_u.kerberos_auth->session_key.data,
4145                                 cli->auth->a_u.kerberos_auth->session_key.length);
4146                         break;
4147                 case PIPE_AUTH_TYPE_NONE:
4148                         *session_key = data_blob_talloc(mem_ctx,
4149                                 cli->auth->user_session_key.data,
4150                                 cli->auth->user_session_key.length);
4151                         break;
4152                 default:
4153                         return NT_STATUS_NO_USER_SESSION_KEY;
4154         }
4155
4156         return NT_STATUS_OK;
4157 }