Add code to torture the fragmentation code a bit
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85                                          struct cli_state *cli,
86                                          const struct ndr_syntax_id *interface)
87 {
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         return NULL;
102 }
103
104 /********************************************************************
105  Map internal value to wire value.
106  ********************************************************************/
107
108 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
109 {
110         switch (auth_type) {
111
112         case PIPE_AUTH_TYPE_NONE:
113                 return RPC_ANONYMOUS_AUTH_TYPE;
114
115         case PIPE_AUTH_TYPE_NTLMSSP:
116                 return RPC_NTLMSSP_AUTH_TYPE;
117
118         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
119         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
120                 return RPC_SPNEGO_AUTH_TYPE;
121
122         case PIPE_AUTH_TYPE_SCHANNEL:
123                 return RPC_SCHANNEL_AUTH_TYPE;
124
125         case PIPE_AUTH_TYPE_KRB5:
126                 return RPC_KRB5_AUTH_TYPE;
127
128         default:
129                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
130                         "auth type %u\n",
131                         (unsigned int)auth_type ));
132                 break;
133         }
134         return -1;
135 }
136
137 /********************************************************************
138  Pipe description for a DEBUG
139  ********************************************************************/
140 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
141 {
142         char *result;
143
144         switch (cli->transport_type) {
145         case NCACN_NP:
146                 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
147                                          "fnum 0x%x",
148                                          cli->desthost,
149                                          cli->trans.np.pipe_name,
150                                          (unsigned int)(cli->trans.np.fnum));
151                 break;
152         case NCACN_IP_TCP:
153         case NCACN_UNIX_STREAM:
154                 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
155                                          cli->desthost, cli->trans.sock.fd);
156                 break;
157         default:
158                 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
159                 break;
160         }
161         SMB_ASSERT(result != NULL);
162         return result;
163 }
164
165 /********************************************************************
166  Rpc pipe call id.
167  ********************************************************************/
168
169 static uint32 get_rpc_call_id(void)
170 {
171         static uint32 call_id = 0;
172         return ++call_id;
173 }
174
175 /*
176  * Realloc pdu to have a least "size" bytes
177  */
178
179 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
180 {
181         size_t extra_size;
182
183         if (prs_data_size(pdu) >= size) {
184                 return true;
185         }
186
187         extra_size = size - prs_data_size(pdu);
188
189         if (!prs_force_grow(pdu, extra_size)) {
190                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
191                           "%d bytes.\n", (int)extra_size));
192                 return false;
193         }
194
195         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
196                   (int)extra_size, prs_data_size(pdu)));
197         return true;
198 }
199
200
201 /*******************************************************************
202  Use SMBreadX to get rest of one fragment's worth of rpc data.
203  Reads the whole size or give an error message
204  ********************************************************************/
205
206 struct rpc_read_state {
207         struct event_context *ev;
208         struct rpc_pipe_client *cli;
209         char *data;
210         size_t size;
211         size_t num_read;
212 };
213
214 static void rpc_read_np_done(struct async_req *subreq);
215 static void rpc_read_sock_done(struct async_req *subreq);
216
217 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
218                                        struct event_context *ev,
219                                        struct rpc_pipe_client *cli,
220                                        char *data, size_t size)
221 {
222         struct async_req *result, *subreq;
223         struct rpc_read_state *state;
224
225         result = async_req_new(mem_ctx);
226         if (result == NULL) {
227                 return NULL;
228         }
229         state = talloc(result, struct rpc_read_state);
230         if (state == NULL) {
231                 goto fail;
232         }
233         result->private_data = state;
234
235         state->ev = ev;
236         state->cli = cli;
237         state->data = data;
238         state->size = size;
239         state->num_read = 0;
240
241         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
242
243         if (cli->transport_type == NCACN_NP) {
244                 subreq = cli_read_andx_send(
245                         state, ev, cli->trans.np.cli,
246                         cli->trans.np.fnum, 0, size);
247                 if (subreq == NULL) {
248                         DEBUG(10, ("cli_read_andx_send failed\n"));
249                         goto fail;
250                 }
251                 subreq->async.fn = rpc_read_np_done;
252                 subreq->async.priv = result;
253                 return result;
254         }
255
256         if ((cli->transport_type == NCACN_IP_TCP)
257             || (cli->transport_type == NCACN_UNIX_STREAM)) {
258                 subreq = recvall_send(state, ev, cli->trans.sock.fd,
259                                       data, size, 0);
260                 if (subreq == NULL) {
261                         DEBUG(10, ("recvall_send failed\n"));
262                         goto fail;
263                 }
264                 subreq->async.fn = rpc_read_sock_done;
265                 subreq->async.priv = result;
266                 return result;
267         }
268
269         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
270                 return result;
271         }
272  fail:
273         TALLOC_FREE(result);
274         return NULL;
275 }
276
277 static void rpc_read_np_done(struct async_req *subreq)
278 {
279         struct async_req *req = talloc_get_type_abort(
280                 subreq->async.priv, struct async_req);
281         struct rpc_read_state *state = talloc_get_type_abort(
282                 req->private_data, struct rpc_read_state);
283         NTSTATUS status;
284         ssize_t received;
285         uint8_t *rcvbuf;
286
287         status = cli_read_andx_recv(subreq, &received, &rcvbuf);
288         /*
289          * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
290          * child of that.
291          */
292         if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
293                 status = NT_STATUS_OK;
294         }
295         if (!NT_STATUS_IS_OK(status)) {
296                 TALLOC_FREE(subreq);
297                 async_req_error(req, status);
298                 return;
299         }
300
301         memcpy(state->data + state->num_read, rcvbuf, received);
302         TALLOC_FREE(subreq);
303
304         state->num_read += received;
305
306         if (state->num_read == state->size) {
307                 async_req_done(req);
308                 return;
309         }
310
311         subreq = cli_read_andx_send(
312                 state, state->ev, state->cli->trans.np.cli,
313                 state->cli->trans.np.fnum, 0,
314                 state->size - state->num_read);
315
316         if (async_req_nomem(subreq, req)) {
317                 return;
318         }
319
320         subreq->async.fn = rpc_read_np_done;
321         subreq->async.priv = req;
322 }
323
324 static void rpc_read_sock_done(struct async_req *subreq)
325 {
326         struct async_req *req = talloc_get_type_abort(
327                 subreq->async.priv, struct async_req);
328         NTSTATUS status;
329
330         status = recvall_recv(subreq);
331         TALLOC_FREE(subreq);
332         if (!NT_STATUS_IS_OK(status)) {
333                 async_req_error(req, status);
334                 return;
335         }
336
337         async_req_done(req);
338 }
339
340 static NTSTATUS rpc_read_recv(struct async_req *req)
341 {
342         return async_req_simple_recv(req);
343 }
344
345 struct rpc_write_state {
346         struct event_context *ev;
347         struct rpc_pipe_client *cli;
348         const char *data;
349         size_t size;
350         size_t num_written;
351 };
352
353 static void rpc_write_np_done(struct async_req *subreq);
354 static void rpc_write_sock_done(struct async_req *subreq);
355
356 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
357                                         struct event_context *ev,
358                                         struct rpc_pipe_client *cli,
359                                         const char *data, size_t size)
360 {
361         struct async_req *result, *subreq;
362         struct rpc_write_state *state;
363
364         result = async_req_new(mem_ctx);
365         if (result == NULL) {
366                 return NULL;
367         }
368         state = talloc(result, struct rpc_write_state);
369         if (state == NULL) {
370                 goto fail;
371         }
372         result->private_data = state;
373
374         state->ev = ev;
375         state->cli = cli;
376         state->data = data;
377         state->size = size;
378         state->num_written = 0;
379
380         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
381
382         if (cli->transport_type == NCACN_NP) {
383                 subreq = cli_write_andx_send(
384                         state, ev, cli->trans.np.cli,
385                         cli->trans.np.fnum, 8, /* 8 means message mode. */
386                         (uint8_t *)data, 0, size);
387                 if (subreq == NULL) {
388                         DEBUG(10, ("cli_write_andx_send failed\n"));
389                         goto fail;
390                 }
391                 subreq->async.fn = rpc_write_np_done;
392                 subreq->async.priv = result;
393                 return result;
394         }
395
396         if ((cli->transport_type == NCACN_IP_TCP)
397             || (cli->transport_type == NCACN_UNIX_STREAM)) {
398                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
399                                       data, size, 0);
400                 if (subreq == NULL) {
401                         DEBUG(10, ("sendall_send failed\n"));
402                         goto fail;
403                 }
404                 subreq->async.fn = rpc_write_sock_done;
405                 subreq->async.priv = result;
406                 return result;
407         }
408
409         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
410                 return result;
411         }
412  fail:
413         TALLOC_FREE(result);
414         return NULL;
415 }
416
417 static void rpc_write_np_done(struct async_req *subreq)
418 {
419         struct async_req *req = talloc_get_type_abort(
420                 subreq->async.priv, struct async_req);
421         struct rpc_write_state *state = talloc_get_type_abort(
422                 req->private_data, struct rpc_write_state);
423         NTSTATUS status;
424         size_t written;
425
426         status = cli_write_andx_recv(subreq, &written);
427         TALLOC_FREE(subreq);
428         if (!NT_STATUS_IS_OK(status)) {
429                 async_req_error(req, status);
430                 return;
431         }
432
433         state->num_written += written;
434
435         if (state->num_written == state->size) {
436                 async_req_done(req);
437                 return;
438         }
439
440         subreq = cli_write_andx_send(
441                 state, state->ev, state->cli->trans.np.cli,
442                 state->cli->trans.np.fnum, 8,
443                 (uint8_t *)(state->data + state->num_written),
444                 0, state->size - state->num_written);
445
446         if (async_req_nomem(subreq, req)) {
447                 return;
448         }
449
450         subreq->async.fn = rpc_write_np_done;
451         subreq->async.priv = req;
452 }
453
454 static void rpc_write_sock_done(struct async_req *subreq)
455 {
456         struct async_req *req = talloc_get_type_abort(
457                 subreq->async.priv, struct async_req);
458         NTSTATUS status;
459
460         status = sendall_recv(subreq);
461         TALLOC_FREE(subreq);
462         if (!NT_STATUS_IS_OK(status)) {
463                 async_req_error(req, status);
464                 return;
465         }
466
467         async_req_done(req);
468 }
469
470 static NTSTATUS rpc_write_recv(struct async_req *req)
471 {
472         return async_req_simple_recv(req);
473 }
474
475
476 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
477                                  struct rpc_hdr_info *prhdr,
478                                  prs_struct *pdu)
479 {
480         /*
481          * This next call sets the endian bit correctly in current_pdu. We
482          * will propagate this to rbuf later.
483          */
484
485         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
486                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
487                 return NT_STATUS_BUFFER_TOO_SMALL;
488         }
489
490         if (prhdr->frag_len > cli->max_recv_frag) {
491                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
492                           " we only allow %d\n", (int)prhdr->frag_len,
493                           (int)cli->max_recv_frag));
494                 return NT_STATUS_BUFFER_TOO_SMALL;
495         }
496
497         return NT_STATUS_OK;
498 }
499
500 /****************************************************************************
501  Try and get a PDU's worth of data from current_pdu. If not, then read more
502  from the wire.
503  ****************************************************************************/
504
505 struct get_complete_frag_state {
506         struct event_context *ev;
507         struct rpc_pipe_client *cli;
508         struct rpc_hdr_info *prhdr;
509         prs_struct *pdu;
510 };
511
512 static void get_complete_frag_got_header(struct async_req *subreq);
513 static void get_complete_frag_got_rest(struct async_req *subreq);
514
515 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
516                                                struct event_context *ev,
517                                                struct rpc_pipe_client *cli,
518                                                struct rpc_hdr_info *prhdr,
519                                                prs_struct *pdu)
520 {
521         struct async_req *result, *subreq;
522         struct get_complete_frag_state *state;
523         uint32_t pdu_len;
524         NTSTATUS status;
525
526         result = async_req_new(mem_ctx);
527         if (result == NULL) {
528                 return NULL;
529         }
530         state = talloc(result, struct get_complete_frag_state);
531         if (state == NULL) {
532                 goto fail;
533         }
534         result->private_data = state;
535
536         state->ev = ev;
537         state->cli = cli;
538         state->prhdr = prhdr;
539         state->pdu = pdu;
540
541         pdu_len = prs_data_size(pdu);
542         if (pdu_len < RPC_HEADER_LEN) {
543                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
544                         status = NT_STATUS_NO_MEMORY;
545                         goto post_status;
546                 }
547                 subreq = rpc_read_send(state, state->ev, state->cli,
548                                        prs_data_p(state->pdu) + pdu_len,
549                                        RPC_HEADER_LEN - pdu_len);
550                 if (subreq == NULL) {
551                         status = NT_STATUS_NO_MEMORY;
552                         goto post_status;
553                 }
554                 subreq->async.fn = get_complete_frag_got_header;
555                 subreq->async.priv = result;
556                 return result;
557         }
558
559         status = parse_rpc_header(cli, prhdr, pdu);
560         if (!NT_STATUS_IS_OK(status)) {
561                 goto post_status;
562         }
563
564         /*
565          * Ensure we have frag_len bytes of data.
566          */
567         if (pdu_len < prhdr->frag_len) {
568                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
569                         status = NT_STATUS_NO_MEMORY;
570                         goto post_status;
571                 }
572                 subreq = rpc_read_send(state, state->ev, state->cli,
573                                        prs_data_p(pdu) + pdu_len,
574                                        prhdr->frag_len - pdu_len);
575                 if (subreq == NULL) {
576                         status = NT_STATUS_NO_MEMORY;
577                         goto post_status;
578                 }
579                 subreq->async.fn = get_complete_frag_got_rest;
580                 subreq->async.priv = result;
581                 return result;
582         }
583
584         status = NT_STATUS_OK;
585  post_status:
586         if (async_post_status(result, ev, status)) {
587                 return result;
588         }
589  fail:
590         TALLOC_FREE(result);
591         return NULL;
592 }
593
594 static void get_complete_frag_got_header(struct async_req *subreq)
595 {
596         struct async_req *req = talloc_get_type_abort(
597                 subreq->async.priv, struct async_req);
598         struct get_complete_frag_state *state = talloc_get_type_abort(
599                 req->private_data, struct get_complete_frag_state);
600         NTSTATUS status;
601
602         status = rpc_read_recv(subreq);
603         TALLOC_FREE(subreq);
604         if (!NT_STATUS_IS_OK(status)) {
605                 async_req_error(req, status);
606                 return;
607         }
608
609         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
610         if (!NT_STATUS_IS_OK(status)) {
611                 async_req_error(req, status);
612                 return;
613         }
614
615         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
616                 async_req_error(req, NT_STATUS_NO_MEMORY);
617                 return;
618         }
619
620         /*
621          * We're here in this piece of code because we've read exactly
622          * RPC_HEADER_LEN bytes into state->pdu.
623          */
624
625         subreq = rpc_read_send(state, state->ev, state->cli,
626                                prs_data_p(state->pdu) + RPC_HEADER_LEN,
627                                state->prhdr->frag_len - RPC_HEADER_LEN);
628         if (async_req_nomem(subreq, req)) {
629                 return;
630         }
631         subreq->async.fn = get_complete_frag_got_rest;
632         subreq->async.priv = req;
633 }
634
635 static void get_complete_frag_got_rest(struct async_req *subreq)
636 {
637         struct async_req *req = talloc_get_type_abort(
638                 subreq->async.priv, struct async_req);
639         NTSTATUS status;
640
641         status = rpc_read_recv(subreq);
642         TALLOC_FREE(subreq);
643         if (!NT_STATUS_IS_OK(status)) {
644                 async_req_error(req, status);
645                 return;
646         }
647         async_req_done(req);
648 }
649
650 static NTSTATUS get_complete_frag_recv(struct async_req *req)
651 {
652         return async_req_simple_recv(req);
653 }
654
655 /****************************************************************************
656  NTLMSSP specific sign/seal.
657  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
658  In fact I should probably abstract these into identical pieces of code... JRA.
659  ****************************************************************************/
660
661 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
662                                 prs_struct *current_pdu,
663                                 uint8 *p_ss_padding_len)
664 {
665         RPC_HDR_AUTH auth_info;
666         uint32 save_offset = prs_offset(current_pdu);
667         uint32 auth_len = prhdr->auth_len;
668         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
669         unsigned char *data = NULL;
670         size_t data_len;
671         unsigned char *full_packet_data = NULL;
672         size_t full_packet_data_len;
673         DATA_BLOB auth_blob;
674         NTSTATUS status;
675
676         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
677             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
678                 return NT_STATUS_OK;
679         }
680
681         if (!ntlmssp_state) {
682                 return NT_STATUS_INVALID_PARAMETER;
683         }
684
685         /* Ensure there's enough data for an authenticated response. */
686         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
687                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
688                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
689                         (unsigned int)auth_len ));
690                 return NT_STATUS_BUFFER_TOO_SMALL;
691         }
692
693         /*
694          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
695          * after the RPC header.
696          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
697          * functions as NTLMv2 checks the rpc headers also.
698          */
699
700         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
701         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
702
703         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
704         full_packet_data_len = prhdr->frag_len - auth_len;
705
706         /* Pull the auth header and the following data into a blob. */
707         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
708                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
709                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
710                 return NT_STATUS_BUFFER_TOO_SMALL;
711         }
712
713         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
714                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
715                 return NT_STATUS_BUFFER_TOO_SMALL;
716         }
717
718         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
719         auth_blob.length = auth_len;
720
721         switch (cli->auth->auth_level) {
722                 case PIPE_AUTH_LEVEL_PRIVACY:
723                         /* Data is encrypted. */
724                         status = ntlmssp_unseal_packet(ntlmssp_state,
725                                                         data, data_len,
726                                                         full_packet_data,
727                                                         full_packet_data_len,
728                                                         &auth_blob);
729                         if (!NT_STATUS_IS_OK(status)) {
730                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
731                                         "packet from %s. Error was %s.\n",
732                                         rpccli_pipe_txt(debug_ctx(), cli),
733                                         nt_errstr(status) ));
734                                 return status;
735                         }
736                         break;
737                 case PIPE_AUTH_LEVEL_INTEGRITY:
738                         /* Data is signed. */
739                         status = ntlmssp_check_packet(ntlmssp_state,
740                                                         data, data_len,
741                                                         full_packet_data,
742                                                         full_packet_data_len,
743                                                         &auth_blob);
744                         if (!NT_STATUS_IS_OK(status)) {
745                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
746                                         "packet from %s. Error was %s.\n",
747                                         rpccli_pipe_txt(debug_ctx(), cli),
748                                         nt_errstr(status) ));
749                                 return status;
750                         }
751                         break;
752                 default:
753                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
754                                   "auth level %d\n", cli->auth->auth_level));
755                         return NT_STATUS_INVALID_INFO_CLASS;
756         }
757
758         /*
759          * Return the current pointer to the data offset.
760          */
761
762         if(!prs_set_offset(current_pdu, save_offset)) {
763                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
764                         (unsigned int)save_offset ));
765                 return NT_STATUS_BUFFER_TOO_SMALL;
766         }
767
768         /*
769          * Remember the padding length. We must remove it from the real data
770          * stream once the sign/seal is done.
771          */
772
773         *p_ss_padding_len = auth_info.auth_pad_len;
774
775         return NT_STATUS_OK;
776 }
777
778 /****************************************************************************
779  schannel specific sign/seal.
780  ****************************************************************************/
781
782 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
783                                 prs_struct *current_pdu,
784                                 uint8 *p_ss_padding_len)
785 {
786         RPC_HDR_AUTH auth_info;
787         RPC_AUTH_SCHANNEL_CHK schannel_chk;
788         uint32 auth_len = prhdr->auth_len;
789         uint32 save_offset = prs_offset(current_pdu);
790         struct schannel_auth_struct *schannel_auth =
791                 cli->auth->a_u.schannel_auth;
792         uint32 data_len;
793
794         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
795             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
796                 return NT_STATUS_OK;
797         }
798
799         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
800                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
801                 return NT_STATUS_INVALID_PARAMETER;
802         }
803
804         if (!schannel_auth) {
805                 return NT_STATUS_INVALID_PARAMETER;
806         }
807
808         /* Ensure there's enough data for an authenticated response. */
809         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
810                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
811                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
812                         (unsigned int)auth_len ));
813                 return NT_STATUS_INVALID_PARAMETER;
814         }
815
816         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
817
818         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
819                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
820                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
821                 return NT_STATUS_BUFFER_TOO_SMALL;
822         }
823
824         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
825                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
826                 return NT_STATUS_BUFFER_TOO_SMALL;
827         }
828
829         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
830                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
831                         auth_info.auth_type));
832                 return NT_STATUS_BUFFER_TOO_SMALL;
833         }
834
835         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
836                                 &schannel_chk, current_pdu, 0)) {
837                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
838                 return NT_STATUS_BUFFER_TOO_SMALL;
839         }
840
841         if (!schannel_decode(schannel_auth,
842                         cli->auth->auth_level,
843                         SENDER_IS_ACCEPTOR,
844                         &schannel_chk,
845                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
846                         data_len)) {
847                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
848                                 "Connection to %s.\n",
849                                 rpccli_pipe_txt(debug_ctx(), cli)));
850                 return NT_STATUS_INVALID_PARAMETER;
851         }
852
853         /* The sequence number gets incremented on both send and receive. */
854         schannel_auth->seq_num++;
855
856         /*
857          * Return the current pointer to the data offset.
858          */
859
860         if(!prs_set_offset(current_pdu, save_offset)) {
861                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
862                         (unsigned int)save_offset ));
863                 return NT_STATUS_BUFFER_TOO_SMALL;
864         }
865
866         /*
867          * Remember the padding length. We must remove it from the real data
868          * stream once the sign/seal is done.
869          */
870
871         *p_ss_padding_len = auth_info.auth_pad_len;
872
873         return NT_STATUS_OK;
874 }
875
876 /****************************************************************************
877  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
878  ****************************************************************************/
879
880 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
881                                 prs_struct *current_pdu,
882                                 uint8 *p_ss_padding_len)
883 {
884         NTSTATUS ret = NT_STATUS_OK;
885
886         /* Paranioa checks for auth_len. */
887         if (prhdr->auth_len) {
888                 if (prhdr->auth_len > prhdr->frag_len) {
889                         return NT_STATUS_INVALID_PARAMETER;
890                 }
891
892                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
893                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
894                         /* Integer wrap attempt. */
895                         return NT_STATUS_INVALID_PARAMETER;
896                 }
897         }
898
899         /*
900          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
901          */
902
903         switch(cli->auth->auth_type) {
904                 case PIPE_AUTH_TYPE_NONE:
905                         if (prhdr->auth_len) {
906                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
907                                           "Connection to %s - got non-zero "
908                                           "auth len %u.\n",
909                                         rpccli_pipe_txt(debug_ctx(), cli),
910                                         (unsigned int)prhdr->auth_len ));
911                                 return NT_STATUS_INVALID_PARAMETER;
912                         }
913                         break;
914
915                 case PIPE_AUTH_TYPE_NTLMSSP:
916                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
917                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
918                         if (!NT_STATUS_IS_OK(ret)) {
919                                 return ret;
920                         }
921                         break;
922
923                 case PIPE_AUTH_TYPE_SCHANNEL:
924                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
925                         if (!NT_STATUS_IS_OK(ret)) {
926                                 return ret;
927                         }
928                         break;
929
930                 case PIPE_AUTH_TYPE_KRB5:
931                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
932                 default:
933                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
934                                   "to %s - unknown internal auth type %u.\n",
935                                   rpccli_pipe_txt(debug_ctx(), cli),
936                                   cli->auth->auth_type ));
937                         return NT_STATUS_INVALID_INFO_CLASS;
938         }
939
940         return NT_STATUS_OK;
941 }
942
943 /****************************************************************************
944  Do basic authentication checks on an incoming pdu.
945  ****************************************************************************/
946
947 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
948                         prs_struct *current_pdu,
949                         uint8 expected_pkt_type,
950                         char **ppdata,
951                         uint32 *pdata_len,
952                         prs_struct *return_data)
953 {
954
955         NTSTATUS ret = NT_STATUS_OK;
956         uint32 current_pdu_len = prs_data_size(current_pdu);
957
958         if (current_pdu_len != prhdr->frag_len) {
959                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
960                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
961                 return NT_STATUS_INVALID_PARAMETER;
962         }
963
964         /*
965          * Point the return values at the real data including the RPC
966          * header. Just in case the caller wants it.
967          */
968         *ppdata = prs_data_p(current_pdu);
969         *pdata_len = current_pdu_len;
970
971         /* Ensure we have the correct type. */
972         switch (prhdr->pkt_type) {
973                 case RPC_ALTCONTRESP:
974                 case RPC_BINDACK:
975
976                         /* Alter context and bind ack share the same packet definitions. */
977                         break;
978
979
980                 case RPC_RESPONSE:
981                 {
982                         RPC_HDR_RESP rhdr_resp;
983                         uint8 ss_padding_len = 0;
984
985                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
986                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
987                                 return NT_STATUS_BUFFER_TOO_SMALL;
988                         }
989
990                         /* Here's where we deal with incoming sign/seal. */
991                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
992                                         current_pdu, &ss_padding_len);
993                         if (!NT_STATUS_IS_OK(ret)) {
994                                 return ret;
995                         }
996
997                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
998                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
999
1000                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
1001                                 return NT_STATUS_BUFFER_TOO_SMALL;
1002                         }
1003
1004                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
1005
1006                         /* Remember to remove the auth footer. */
1007                         if (prhdr->auth_len) {
1008                                 /* We've already done integer wrap tests on auth_len in
1009                                         cli_pipe_validate_rpc_response(). */
1010                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
1011                                         return NT_STATUS_BUFFER_TOO_SMALL;
1012                                 }
1013                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
1014                         }
1015
1016                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
1017                                 current_pdu_len, *pdata_len, ss_padding_len ));
1018
1019                         /*
1020                          * If this is the first reply, and the allocation hint is reasonably, try and
1021                          * set up the return_data parse_struct to the correct size.
1022                          */
1023
1024                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1025                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1026                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1027                                                 "too large to allocate\n",
1028                                                 (unsigned int)rhdr_resp.alloc_hint ));
1029                                         return NT_STATUS_NO_MEMORY;
1030                                 }
1031                         }
1032
1033                         break;
1034                 }
1035
1036                 case RPC_BINDNACK:
1037                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1038                                   "received from %s!\n",
1039                                   rpccli_pipe_txt(debug_ctx(), cli)));
1040                         /* Use this for now... */
1041                         return NT_STATUS_NETWORK_ACCESS_DENIED;
1042
1043                 case RPC_FAULT:
1044                 {
1045                         RPC_HDR_RESP rhdr_resp;
1046                         RPC_HDR_FAULT fault_resp;
1047
1048                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1049                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1050                                 return NT_STATUS_BUFFER_TOO_SMALL;
1051                         }
1052
1053                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1054                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1055                                 return NT_STATUS_BUFFER_TOO_SMALL;
1056                         }
1057
1058                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1059                                   "code %s received from %s!\n",
1060                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1061                                 rpccli_pipe_txt(debug_ctx(), cli)));
1062                         if (NT_STATUS_IS_OK(fault_resp.status)) {
1063                                 return NT_STATUS_UNSUCCESSFUL;
1064                         } else {
1065                                 return fault_resp.status;
1066                         }
1067                 }
1068
1069                 default:
1070                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1071                                 "from %s!\n",
1072                                 (unsigned int)prhdr->pkt_type,
1073                                 rpccli_pipe_txt(debug_ctx(), cli)));
1074                         return NT_STATUS_INVALID_INFO_CLASS;
1075         }
1076
1077         if (prhdr->pkt_type != expected_pkt_type) {
1078                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1079                           "got an unexpected RPC packet type - %u, not %u\n",
1080                         rpccli_pipe_txt(debug_ctx(), cli),
1081                         prhdr->pkt_type,
1082                         expected_pkt_type));
1083                 return NT_STATUS_INVALID_INFO_CLASS;
1084         }
1085
1086         /* Do this just before return - we don't want to modify any rpc header
1087            data before now as we may have needed to do cryptographic actions on
1088            it before. */
1089
1090         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1091                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1092                         "setting fragment first/last ON.\n"));
1093                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1094         }
1095
1096         return NT_STATUS_OK;
1097 }
1098
1099 /****************************************************************************
1100  Ensure we eat the just processed pdu from the current_pdu prs_struct.
1101  Normally the frag_len and buffer size will match, but on the first trans
1102  reply there is a theoretical chance that buffer size > frag_len, so we must
1103  deal with that.
1104  ****************************************************************************/
1105
1106 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1107 {
1108         uint32 current_pdu_len = prs_data_size(current_pdu);
1109
1110         if (current_pdu_len < prhdr->frag_len) {
1111                 return NT_STATUS_BUFFER_TOO_SMALL;
1112         }
1113
1114         /* Common case. */
1115         if (current_pdu_len == (uint32)prhdr->frag_len) {
1116                 prs_mem_free(current_pdu);
1117                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1118                 /* Make current_pdu dynamic with no memory. */
1119                 prs_give_memory(current_pdu, 0, 0, True);
1120                 return NT_STATUS_OK;
1121         }
1122
1123         /*
1124          * Oh no ! More data in buffer than we processed in current pdu.
1125          * Cheat. Move the data down and shrink the buffer.
1126          */
1127
1128         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1129                         current_pdu_len - prhdr->frag_len);
1130
1131         /* Remember to set the read offset back to zero. */
1132         prs_set_offset(current_pdu, 0);
1133
1134         /* Shrink the buffer. */
1135         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1136                 return NT_STATUS_BUFFER_TOO_SMALL;
1137         }
1138
1139         return NT_STATUS_OK;
1140 }
1141
1142 /****************************************************************************
1143  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1144 ****************************************************************************/
1145
1146 struct cli_api_pipe_state {
1147         struct event_context *ev;
1148         struct rpc_pipe_client *cli;
1149         uint32_t max_rdata_len;
1150         uint8_t *rdata;
1151         uint32_t rdata_len;
1152 };
1153
1154 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1155 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1156 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1157
1158 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1159                                            struct event_context *ev,
1160                                            struct rpc_pipe_client *cli,
1161                                            uint8_t *data, size_t data_len,
1162                                            uint32_t max_rdata_len)
1163 {
1164         struct async_req *result, *subreq;
1165         struct cli_api_pipe_state *state;
1166         NTSTATUS status;
1167
1168         result = async_req_new(mem_ctx);
1169         if (result == NULL) {
1170                 return NULL;
1171         }
1172         state = talloc(result, struct cli_api_pipe_state);
1173         if (state == NULL) {
1174                 goto fail;
1175         }
1176         result->private_data = state;
1177
1178         state->ev = ev;
1179         state->cli = cli;
1180         state->max_rdata_len = max_rdata_len;
1181
1182         if (state->max_rdata_len < RPC_HEADER_LEN) {
1183                 /*
1184                  * For a RPC reply we always need at least RPC_HEADER_LEN
1185                  * bytes. We check this here because we will receive
1186                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1187                  */
1188                 status = NT_STATUS_INVALID_PARAMETER;
1189                 goto post_status;
1190         }
1191
1192         if (cli->transport_type == NCACN_NP) {
1193
1194                 uint16_t setup[2];
1195                 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1196                 SSVAL(setup+1, 0, cli->trans.np.fnum);
1197
1198                 subreq = cli_trans_send(
1199                         state, ev, cli->trans.np.cli, SMBtrans,
1200                         "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1201                         NULL, 0, 0, data, data_len, max_rdata_len);
1202                 if (subreq == NULL) {
1203                         status = NT_STATUS_NO_MEMORY;
1204                         goto post_status;
1205                 }
1206                 subreq->async.fn = cli_api_pipe_np_trans_done;
1207                 subreq->async.priv = result;
1208                 return result;
1209         }
1210
1211         if ((cli->transport_type == NCACN_IP_TCP)
1212             || (cli->transport_type == NCACN_UNIX_STREAM)) {
1213                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1214                                       data, data_len, 0);
1215                 if (subreq == NULL) {
1216                         status = NT_STATUS_NO_MEMORY;
1217                         goto post_status;
1218                 }
1219                 subreq->async.fn = cli_api_pipe_sock_send_done;
1220                 subreq->async.priv = result;
1221                 return result;
1222         }
1223
1224         status = NT_STATUS_INVALID_PARAMETER;
1225
1226  post_status:
1227         if (async_post_status(result, ev, status)) {
1228                 return result;
1229         }
1230  fail:
1231         TALLOC_FREE(result);
1232         return NULL;
1233 }
1234
1235 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1236 {
1237         struct async_req *req = talloc_get_type_abort(
1238                 subreq->async.priv, struct async_req);
1239         struct cli_api_pipe_state *state = talloc_get_type_abort(
1240                 req->private_data, struct cli_api_pipe_state);
1241         NTSTATUS status;
1242
1243         status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1244                                 &state->rdata, &state->rdata_len);
1245         TALLOC_FREE(subreq);
1246         if (!NT_STATUS_IS_OK(status)) {
1247                 async_req_error(req, status);
1248                 return;
1249         }
1250         async_req_done(req);
1251 }
1252
1253 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1254 {
1255         struct async_req *req = talloc_get_type_abort(
1256                 subreq->async.priv, struct async_req);
1257         struct cli_api_pipe_state *state = talloc_get_type_abort(
1258                 req->private_data, struct cli_api_pipe_state);
1259         NTSTATUS status;
1260
1261         status = sendall_recv(subreq);
1262         TALLOC_FREE(subreq);
1263         if (!NT_STATUS_IS_OK(status)) {
1264                 async_req_error(req, status);
1265                 return;
1266         }
1267
1268         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1269         if (async_req_nomem(state->rdata, req)) {
1270                 return;
1271         }
1272         state->rdata_len = RPC_HEADER_LEN;
1273
1274         subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1275                               state->rdata, RPC_HEADER_LEN, 0);
1276         if (async_req_nomem(subreq, req)) {
1277                 return;
1278         }
1279         subreq->async.fn = cli_api_pipe_sock_read_done;
1280         subreq->async.priv = req;
1281 }
1282
1283 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1284 {
1285         struct async_req *req = talloc_get_type_abort(
1286                 subreq->async.priv, struct async_req);
1287         NTSTATUS status;
1288
1289         status = recvall_recv(subreq);
1290         TALLOC_FREE(subreq);
1291         if (!NT_STATUS_IS_OK(status)) {
1292                 async_req_error(req, status);
1293                 return;
1294         }
1295         async_req_done(req);
1296 }
1297
1298 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1299                                   uint8_t **prdata, uint32_t *prdata_len)
1300 {
1301         struct cli_api_pipe_state *state = talloc_get_type_abort(
1302                 req->private_data, struct cli_api_pipe_state);
1303         NTSTATUS status;
1304
1305         if (async_req_is_error(req, &status)) {
1306                 return status;
1307         }
1308
1309         *prdata = talloc_move(mem_ctx, &state->rdata);
1310         *prdata_len = state->rdata_len;
1311         return NT_STATUS_OK;
1312 }
1313
1314 /****************************************************************************
1315  Send data on an rpc pipe via trans. The prs_struct data must be the last
1316  pdu fragment of an NDR data stream.
1317
1318  Receive response data from an rpc pipe, which may be large...
1319
1320  Read the first fragment: unfortunately have to use SMBtrans for the first
1321  bit, then SMBreadX for subsequent bits.
1322
1323  If first fragment received also wasn't the last fragment, continue
1324  getting fragments until we _do_ receive the last fragment.
1325
1326  Request/Response PDU's look like the following...
1327
1328  |<------------------PDU len----------------------------------------------->|
1329  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1330
1331  +------------+-----------------+-------------+---------------+-------------+
1332  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1333  +------------+-----------------+-------------+---------------+-------------+
1334
1335  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1336  signing & sealing being negotiated.
1337
1338  ****************************************************************************/
1339
1340 struct rpc_api_pipe_state {
1341         struct event_context *ev;
1342         struct rpc_pipe_client *cli;
1343         uint8_t expected_pkt_type;
1344
1345         prs_struct incoming_frag;
1346         struct rpc_hdr_info rhdr;
1347
1348         prs_struct incoming_pdu;        /* Incoming reply */
1349         uint32_t incoming_pdu_offset;
1350 };
1351
1352 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1353 {
1354         prs_mem_free(&state->incoming_frag);
1355         prs_mem_free(&state->incoming_pdu);
1356         return 0;
1357 }
1358
1359 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1360 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1361
1362 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1363                                            struct event_context *ev,
1364                                            struct rpc_pipe_client *cli,
1365                                            prs_struct *data, /* Outgoing PDU */
1366                                            uint8_t expected_pkt_type)
1367 {
1368         struct async_req *result, *subreq;
1369         struct rpc_api_pipe_state *state;
1370         uint16_t max_recv_frag;
1371         NTSTATUS status;
1372
1373         result = async_req_new(mem_ctx);
1374         if (result == NULL) {
1375                 return NULL;
1376         }
1377         state = talloc(result, struct rpc_api_pipe_state);
1378         if (state == NULL) {
1379                 goto fail;
1380         }
1381         result->private_data = state;
1382
1383         state->ev = ev;
1384         state->cli = cli;
1385         state->expected_pkt_type = expected_pkt_type;
1386         state->incoming_pdu_offset = 0;
1387
1388         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1389
1390         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1391         /* Make incoming_pdu dynamic with no memory. */
1392         prs_give_memory(&state->incoming_pdu, 0, 0, true);
1393
1394         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1395
1396         /*
1397          * Ensure we're not sending too much.
1398          */
1399         if (prs_offset(data) > cli->max_xmit_frag) {
1400                 status = NT_STATUS_INVALID_PARAMETER;
1401                 goto post_status;
1402         }
1403
1404         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1405
1406         max_recv_frag = cli->max_recv_frag;
1407
1408 #ifdef DEVELOPER
1409         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1410 #endif
1411
1412         subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1413                                    prs_offset(data), max_recv_frag);
1414         if (subreq == NULL) {
1415                 status = NT_STATUS_NO_MEMORY;
1416                 goto post_status;
1417         }
1418         subreq->async.fn = rpc_api_pipe_trans_done;
1419         subreq->async.priv = result;
1420         return result;
1421
1422  post_status:
1423         if (async_post_status(result, ev, status)) {
1424                 return result;
1425         }
1426  fail:
1427         TALLOC_FREE(result);
1428         return NULL;
1429 }
1430
1431 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1432 {
1433         struct async_req *req = talloc_get_type_abort(
1434                 subreq->async.priv, struct async_req);
1435         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1436                 req->private_data, struct rpc_api_pipe_state);
1437         NTSTATUS status;
1438         uint8_t *rdata = NULL;
1439         uint32_t rdata_len = 0;
1440         char *rdata_copy;
1441
1442         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1443         TALLOC_FREE(subreq);
1444         if (!NT_STATUS_IS_OK(status)) {
1445                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1446                 async_req_error(req, status);
1447                 return;
1448         }
1449
1450         if (rdata == NULL) {
1451                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1452                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1453                 async_req_done(req);
1454                 return;
1455         }
1456
1457         /*
1458          * Give the memory received from cli_trans as dynamic to the current
1459          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1460          * :-(
1461          */
1462         rdata_copy = (char *)memdup(rdata, rdata_len);
1463         TALLOC_FREE(rdata);
1464         if (async_req_nomem(rdata_copy, req)) {
1465                 return;
1466         }
1467         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1468
1469         /* Ensure we have enough data for a pdu. */
1470         subreq = get_complete_frag_send(state, state->ev, state->cli,
1471                                         &state->rhdr, &state->incoming_frag);
1472         if (async_req_nomem(subreq, req)) {
1473                 return;
1474         }
1475         subreq->async.fn = rpc_api_pipe_got_pdu;
1476         subreq->async.priv = req;
1477 }
1478
1479 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1480 {
1481         struct async_req *req = talloc_get_type_abort(
1482                 subreq->async.priv, struct async_req);
1483         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1484                 req->private_data, struct rpc_api_pipe_state);
1485         NTSTATUS status;
1486         char *rdata = NULL;
1487         uint32_t rdata_len = 0;
1488
1489         status = get_complete_frag_recv(subreq);
1490         TALLOC_FREE(subreq);
1491         if (!NT_STATUS_IS_OK(status)) {
1492                 DEBUG(5, ("get_complete_frag failed: %s\n",
1493                           nt_errstr(status)));
1494                 async_req_error(req, status);
1495                 return;
1496         }
1497
1498         status = cli_pipe_validate_current_pdu(
1499                 state->cli, &state->rhdr, &state->incoming_frag,
1500                 state->expected_pkt_type, &rdata, &rdata_len,
1501                 &state->incoming_pdu);
1502
1503         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1504                   (unsigned)prs_data_size(&state->incoming_frag),
1505                   (unsigned)state->incoming_pdu_offset,
1506                   nt_errstr(status)));
1507
1508         if (!NT_STATUS_IS_OK(status)) {
1509                 async_req_error(req, status);
1510                 return;
1511         }
1512
1513         if ((state->rhdr.flags & RPC_FLG_FIRST)
1514             && (state->rhdr.pack_type[0] == 0)) {
1515                 /*
1516                  * Set the data type correctly for big-endian data on the
1517                  * first packet.
1518                  */
1519                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1520                           "big-endian.\n",
1521                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1522                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1523         }
1524         /*
1525          * Check endianness on subsequent packets.
1526          */
1527         if (state->incoming_frag.bigendian_data
1528             != state->incoming_pdu.bigendian_data) {
1529                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1530                          "%s\n",
1531                          state->incoming_pdu.bigendian_data?"big":"little",
1532                          state->incoming_frag.bigendian_data?"big":"little"));
1533                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1534                 return;
1535         }
1536
1537         /* Now copy the data portion out of the pdu into rbuf. */
1538         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1539                 async_req_error(req, NT_STATUS_NO_MEMORY);
1540                 return;
1541         }
1542
1543         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1544                rdata, (size_t)rdata_len);
1545         state->incoming_pdu_offset += rdata_len;
1546
1547         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1548                                             &state->incoming_frag);
1549         if (!NT_STATUS_IS_OK(status)) {
1550                 async_req_error(req, status);
1551                 return;
1552         }
1553
1554         if (state->rhdr.flags & RPC_FLG_LAST) {
1555                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1556                           rpccli_pipe_txt(debug_ctx(), state->cli),
1557                           (unsigned)prs_data_size(&state->incoming_pdu)));
1558                 async_req_done(req);
1559                 return;
1560         }
1561
1562         subreq = get_complete_frag_send(state, state->ev, state->cli,
1563                                         &state->rhdr, &state->incoming_frag);
1564         if (async_req_nomem(subreq, req)) {
1565                 return;
1566         }
1567         subreq->async.fn = rpc_api_pipe_got_pdu;
1568         subreq->async.priv = req;
1569 }
1570
1571 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1572                                   prs_struct *reply_pdu)
1573 {
1574         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1575                 req->private_data, struct rpc_api_pipe_state);
1576         NTSTATUS status;
1577
1578         if (async_req_is_error(req, &status)) {
1579                 return status;
1580         }
1581
1582         *reply_pdu = state->incoming_pdu;
1583         reply_pdu->mem_ctx = mem_ctx;
1584
1585         /*
1586          * Prevent state->incoming_pdu from being freed in
1587          * rpc_api_pipe_state_destructor()
1588          */
1589         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1590
1591         return NT_STATUS_OK;
1592 }
1593
1594 /*******************************************************************
1595  Creates krb5 auth bind.
1596  ********************************************************************/
1597
1598 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1599                                                 enum pipe_auth_level auth_level,
1600                                                 RPC_HDR_AUTH *pauth_out,
1601                                                 prs_struct *auth_data)
1602 {
1603 #ifdef HAVE_KRB5
1604         int ret;
1605         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1606         DATA_BLOB tkt = data_blob_null;
1607         DATA_BLOB tkt_wrapped = data_blob_null;
1608
1609         /* We may change the pad length before marshalling. */
1610         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1611
1612         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1613                 a->service_principal ));
1614
1615         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1616
1617         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1618                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1619
1620         if (ret) {
1621                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1622                         "failed with %s\n",
1623                         a->service_principal,
1624                         error_message(ret) ));
1625
1626                 data_blob_free(&tkt);
1627                 prs_mem_free(auth_data);
1628                 return NT_STATUS_INVALID_PARAMETER;
1629         }
1630
1631         /* wrap that up in a nice GSS-API wrapping */
1632         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1633
1634         data_blob_free(&tkt);
1635
1636         /* Auth len in the rpc header doesn't include auth_header. */
1637         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1638                 data_blob_free(&tkt_wrapped);
1639                 prs_mem_free(auth_data);
1640                 return NT_STATUS_NO_MEMORY;
1641         }
1642
1643         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1644         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1645
1646         data_blob_free(&tkt_wrapped);
1647         return NT_STATUS_OK;
1648 #else
1649         return NT_STATUS_INVALID_PARAMETER;
1650 #endif
1651 }
1652
1653 /*******************************************************************
1654  Creates SPNEGO NTLMSSP auth bind.
1655  ********************************************************************/
1656
1657 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1658                                                 enum pipe_auth_level auth_level,
1659                                                 RPC_HDR_AUTH *pauth_out,
1660                                                 prs_struct *auth_data)
1661 {
1662         NTSTATUS nt_status;
1663         DATA_BLOB null_blob = data_blob_null;
1664         DATA_BLOB request = data_blob_null;
1665         DATA_BLOB spnego_msg = data_blob_null;
1666
1667         /* We may change the pad length before marshalling. */
1668         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1669
1670         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1671         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1672                                         null_blob,
1673                                         &request);
1674
1675         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1676                 data_blob_free(&request);
1677                 prs_mem_free(auth_data);
1678                 return nt_status;
1679         }
1680
1681         /* Wrap this in SPNEGO. */
1682         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1683
1684         data_blob_free(&request);
1685
1686         /* Auth len in the rpc header doesn't include auth_header. */
1687         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1688                 data_blob_free(&spnego_msg);
1689                 prs_mem_free(auth_data);
1690                 return NT_STATUS_NO_MEMORY;
1691         }
1692
1693         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1694         dump_data(5, spnego_msg.data, spnego_msg.length);
1695
1696         data_blob_free(&spnego_msg);
1697         return NT_STATUS_OK;
1698 }
1699
1700 /*******************************************************************
1701  Creates NTLMSSP auth bind.
1702  ********************************************************************/
1703
1704 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1705                                                 enum pipe_auth_level auth_level,
1706                                                 RPC_HDR_AUTH *pauth_out,
1707                                                 prs_struct *auth_data)
1708 {
1709         NTSTATUS nt_status;
1710         DATA_BLOB null_blob = data_blob_null;
1711         DATA_BLOB request = data_blob_null;
1712
1713         /* We may change the pad length before marshalling. */
1714         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1715
1716         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1717         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1718                                         null_blob,
1719                                         &request);
1720
1721         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1722                 data_blob_free(&request);
1723                 prs_mem_free(auth_data);
1724                 return nt_status;
1725         }
1726
1727         /* Auth len in the rpc header doesn't include auth_header. */
1728         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1729                 data_blob_free(&request);
1730                 prs_mem_free(auth_data);
1731                 return NT_STATUS_NO_MEMORY;
1732         }
1733
1734         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1735         dump_data(5, request.data, request.length);
1736
1737         data_blob_free(&request);
1738         return NT_STATUS_OK;
1739 }
1740
1741 /*******************************************************************
1742  Creates schannel auth bind.
1743  ********************************************************************/
1744
1745 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1746                                                 enum pipe_auth_level auth_level,
1747                                                 RPC_HDR_AUTH *pauth_out,
1748                                                 prs_struct *auth_data)
1749 {
1750         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1751
1752         /* We may change the pad length before marshalling. */
1753         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1754
1755         /* Use lp_workgroup() if domain not specified */
1756
1757         if (!cli->auth->domain || !cli->auth->domain[0]) {
1758                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1759                 if (cli->auth->domain == NULL) {
1760                         return NT_STATUS_NO_MEMORY;
1761                 }
1762         }
1763
1764         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1765                                    global_myname());
1766
1767         /*
1768          * Now marshall the data into the auth parse_struct.
1769          */
1770
1771         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1772                                        &schannel_neg, auth_data, 0)) {
1773                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1774                 prs_mem_free(auth_data);
1775                 return NT_STATUS_NO_MEMORY;
1776         }
1777
1778         return NT_STATUS_OK;
1779 }
1780
1781 /*******************************************************************
1782  Creates the internals of a DCE/RPC bind request or alter context PDU.
1783  ********************************************************************/
1784
1785 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1786                                                 prs_struct *rpc_out, 
1787                                                 uint32 rpc_call_id,
1788                                                 const RPC_IFACE *abstract,
1789                                                 const RPC_IFACE *transfer,
1790                                                 RPC_HDR_AUTH *phdr_auth,
1791                                                 prs_struct *pauth_info)
1792 {
1793         RPC_HDR hdr;
1794         RPC_HDR_RB hdr_rb;
1795         RPC_CONTEXT rpc_ctx;
1796         uint16 auth_len = prs_offset(pauth_info);
1797         uint8 ss_padding_len = 0;
1798         uint16 frag_len = 0;
1799
1800         /* create the RPC context. */
1801         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1802
1803         /* create the bind request RPC_HDR_RB */
1804         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1805
1806         /* Start building the frag length. */
1807         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1808
1809         /* Do we need to pad ? */
1810         if (auth_len) {
1811                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1812                 if (data_len % 8) {
1813                         ss_padding_len = 8 - (data_len % 8);
1814                         phdr_auth->auth_pad_len = ss_padding_len;
1815                 }
1816                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1817         }
1818
1819         /* Create the request RPC_HDR */
1820         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1821
1822         /* Marshall the RPC header */
1823         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1824                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1825                 return NT_STATUS_NO_MEMORY;
1826         }
1827
1828         /* Marshall the bind request data */
1829         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1830                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1831                 return NT_STATUS_NO_MEMORY;
1832         }
1833
1834         /*
1835          * Grow the outgoing buffer to store any auth info.
1836          */
1837
1838         if(auth_len != 0) {
1839                 if (ss_padding_len) {
1840                         char pad[8];
1841                         memset(pad, '\0', 8);
1842                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1843                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1844                                 return NT_STATUS_NO_MEMORY;
1845                         }
1846                 }
1847
1848                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1849                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1850                         return NT_STATUS_NO_MEMORY;
1851                 }
1852
1853
1854                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1855                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1856                         return NT_STATUS_NO_MEMORY;
1857                 }
1858         }
1859
1860         return NT_STATUS_OK;
1861 }
1862
1863 /*******************************************************************
1864  Creates a DCE/RPC bind request.
1865  ********************************************************************/
1866
1867 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1868                                 prs_struct *rpc_out, 
1869                                 uint32 rpc_call_id,
1870                                 const RPC_IFACE *abstract,
1871                                 const RPC_IFACE *transfer,
1872                                 enum pipe_auth_type auth_type,
1873                                 enum pipe_auth_level auth_level)
1874 {
1875         RPC_HDR_AUTH hdr_auth;
1876         prs_struct auth_info;
1877         NTSTATUS ret = NT_STATUS_OK;
1878
1879         ZERO_STRUCT(hdr_auth);
1880         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1881                 return NT_STATUS_NO_MEMORY;
1882
1883         switch (auth_type) {
1884                 case PIPE_AUTH_TYPE_SCHANNEL:
1885                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1886                         if (!NT_STATUS_IS_OK(ret)) {
1887                                 prs_mem_free(&auth_info);
1888                                 return ret;
1889                         }
1890                         break;
1891
1892                 case PIPE_AUTH_TYPE_NTLMSSP:
1893                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1894                         if (!NT_STATUS_IS_OK(ret)) {
1895                                 prs_mem_free(&auth_info);
1896                                 return ret;
1897                         }
1898                         break;
1899
1900                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1901                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1902                         if (!NT_STATUS_IS_OK(ret)) {
1903                                 prs_mem_free(&auth_info);
1904                                 return ret;
1905                         }
1906                         break;
1907
1908                 case PIPE_AUTH_TYPE_KRB5:
1909                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1910                         if (!NT_STATUS_IS_OK(ret)) {
1911                                 prs_mem_free(&auth_info);
1912                                 return ret;
1913                         }
1914                         break;
1915
1916                 case PIPE_AUTH_TYPE_NONE:
1917                         break;
1918
1919                 default:
1920                         /* "Can't" happen. */
1921                         return NT_STATUS_INVALID_INFO_CLASS;
1922         }
1923
1924         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1925                                                 rpc_out, 
1926                                                 rpc_call_id,
1927                                                 abstract,
1928                                                 transfer,
1929                                                 &hdr_auth,
1930                                                 &auth_info);
1931
1932         prs_mem_free(&auth_info);
1933         return ret;
1934 }
1935
1936 /*******************************************************************
1937  Create and add the NTLMSSP sign/seal auth header and data.
1938  ********************************************************************/
1939
1940 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1941                                         RPC_HDR *phdr,
1942                                         uint32 ss_padding_len,
1943                                         prs_struct *outgoing_pdu)
1944 {
1945         RPC_HDR_AUTH auth_info;
1946         NTSTATUS status;
1947         DATA_BLOB auth_blob = data_blob_null;
1948         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1949
1950         if (!cli->auth->a_u.ntlmssp_state) {
1951                 return NT_STATUS_INVALID_PARAMETER;
1952         }
1953
1954         /* Init and marshall the auth header. */
1955         init_rpc_hdr_auth(&auth_info,
1956                         map_pipe_auth_type_to_rpc_auth_type(
1957                                 cli->auth->auth_type),
1958                         cli->auth->auth_level,
1959                         ss_padding_len,
1960                         1 /* context id. */);
1961
1962         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1963                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1964                 data_blob_free(&auth_blob);
1965                 return NT_STATUS_NO_MEMORY;
1966         }
1967
1968         switch (cli->auth->auth_level) {
1969                 case PIPE_AUTH_LEVEL_PRIVACY:
1970                         /* Data portion is encrypted. */
1971                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1972                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1973                                         data_and_pad_len,
1974                                         (unsigned char *)prs_data_p(outgoing_pdu),
1975                                         (size_t)prs_offset(outgoing_pdu),
1976                                         &auth_blob);
1977                         if (!NT_STATUS_IS_OK(status)) {
1978                                 data_blob_free(&auth_blob);
1979                                 return status;
1980                         }
1981                         break;
1982
1983                 case PIPE_AUTH_LEVEL_INTEGRITY:
1984                         /* Data is signed. */
1985                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1986                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1987                                         data_and_pad_len,
1988                                         (unsigned char *)prs_data_p(outgoing_pdu),
1989                                         (size_t)prs_offset(outgoing_pdu),
1990                                         &auth_blob);
1991                         if (!NT_STATUS_IS_OK(status)) {
1992                                 data_blob_free(&auth_blob);
1993                                 return status;
1994                         }
1995                         break;
1996
1997                 default:
1998                         /* Can't happen. */
1999                         smb_panic("bad auth level");
2000                         /* Notreached. */
2001                         return NT_STATUS_INVALID_PARAMETER;
2002         }
2003
2004         /* Finally marshall the blob. */
2005
2006         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
2007                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
2008                         (unsigned int)NTLMSSP_SIG_SIZE));
2009                 data_blob_free(&auth_blob);
2010                 return NT_STATUS_NO_MEMORY;
2011         }
2012
2013         data_blob_free(&auth_blob);
2014         return NT_STATUS_OK;
2015 }
2016
2017 /*******************************************************************
2018  Create and add the schannel sign/seal auth header and data.
2019  ********************************************************************/
2020
2021 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
2022                                         RPC_HDR *phdr,
2023                                         uint32 ss_padding_len,
2024                                         prs_struct *outgoing_pdu)
2025 {
2026         RPC_HDR_AUTH auth_info;
2027         RPC_AUTH_SCHANNEL_CHK verf;
2028         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
2029         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
2030         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
2031
2032         if (!sas) {
2033                 return NT_STATUS_INVALID_PARAMETER;
2034         }
2035
2036         /* Init and marshall the auth header. */
2037         init_rpc_hdr_auth(&auth_info,
2038                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2039                         cli->auth->auth_level,
2040                         ss_padding_len,
2041                         1 /* context id. */);
2042
2043         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2044                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2045                 return NT_STATUS_NO_MEMORY;
2046         }
2047
2048         switch (cli->auth->auth_level) {
2049                 case PIPE_AUTH_LEVEL_PRIVACY:
2050                 case PIPE_AUTH_LEVEL_INTEGRITY:
2051                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2052                                 sas->seq_num));
2053
2054                         schannel_encode(sas,
2055                                         cli->auth->auth_level,
2056                                         SENDER_IS_INITIATOR,
2057                                         &verf,
2058                                         data_p,
2059                                         data_and_pad_len);
2060
2061                         sas->seq_num++;
2062                         break;
2063
2064                 default:
2065                         /* Can't happen. */
2066                         smb_panic("bad auth level");
2067                         /* Notreached. */
2068                         return NT_STATUS_INVALID_PARAMETER;
2069         }
2070
2071         /* Finally marshall the blob. */
2072         smb_io_rpc_auth_schannel_chk("",
2073                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2074                         &verf,
2075                         outgoing_pdu,
2076                         0);
2077
2078         return NT_STATUS_OK;
2079 }
2080
2081 /*******************************************************************
2082  Calculate how much data we're going to send in this packet, also
2083  work out any sign/seal padding length.
2084  ********************************************************************/
2085
2086 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2087                                         uint32 data_left,
2088                                         uint16 *p_frag_len,
2089                                         uint16 *p_auth_len,
2090                                         uint32 *p_ss_padding)
2091 {
2092         uint32 data_space, data_len;
2093
2094 #ifdef DEVELOPER
2095         if ((data_left > 0) && (sys_random() % 2)) {
2096                 data_left = MAX(data_left/2, 1);
2097         }
2098 #endif
2099
2100         switch (cli->auth->auth_level) {
2101                 case PIPE_AUTH_LEVEL_NONE:
2102                 case PIPE_AUTH_LEVEL_CONNECT:
2103                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2104                         data_len = MIN(data_space, data_left);
2105                         *p_ss_padding = 0;
2106                         *p_auth_len = 0;
2107                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2108                         return data_len;
2109
2110                 case PIPE_AUTH_LEVEL_INTEGRITY:
2111                 case PIPE_AUTH_LEVEL_PRIVACY:
2112                         /* Treat the same for all authenticated rpc requests. */
2113                         switch(cli->auth->auth_type) {
2114                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2115                                 case PIPE_AUTH_TYPE_NTLMSSP:
2116                                         *p_auth_len = NTLMSSP_SIG_SIZE;
2117                                         break;
2118                                 case PIPE_AUTH_TYPE_SCHANNEL:
2119                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2120                                         break;
2121                                 default:
2122                                         smb_panic("bad auth type");
2123                                         break;
2124                         }
2125
2126                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2127                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2128
2129                         data_len = MIN(data_space, data_left);
2130                         *p_ss_padding = 0;
2131                         if (data_len % 8) {
2132                                 *p_ss_padding = 8 - (data_len % 8);
2133                         }
2134                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2135                                         data_len + *p_ss_padding +              /* data plus padding. */
2136                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2137                         return data_len;
2138
2139                 default:
2140                         smb_panic("bad auth level");
2141                         /* Notreached. */
2142                         return 0;
2143         }
2144 }
2145
2146 /*******************************************************************
2147  External interface.
2148  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2149  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2150  and deals with signing/sealing details.
2151  ********************************************************************/
2152
2153 struct rpc_api_pipe_req_state {
2154         struct event_context *ev;
2155         struct rpc_pipe_client *cli;
2156         uint8_t op_num;
2157         uint32_t call_id;
2158         prs_struct *req_data;
2159         uint32_t req_data_sent;
2160         prs_struct outgoing_frag;
2161         prs_struct reply_pdu;
2162 };
2163
2164 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2165 {
2166         prs_mem_free(&s->outgoing_frag);
2167         prs_mem_free(&s->reply_pdu);
2168         return 0;
2169 }
2170
2171 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2172 static void rpc_api_pipe_req_done(struct async_req *subreq);
2173 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2174                                   bool *is_last_frag);
2175
2176 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2177                                         struct event_context *ev,
2178                                         struct rpc_pipe_client *cli,
2179                                         uint8_t op_num,
2180                                         prs_struct *req_data)
2181 {
2182         struct async_req *result, *subreq;
2183         struct rpc_api_pipe_req_state *state;
2184         NTSTATUS status;
2185         bool is_last_frag;
2186
2187         result = async_req_new(mem_ctx);
2188         if (result == NULL) {
2189                 return NULL;
2190         }
2191         state = talloc(result, struct rpc_api_pipe_req_state);
2192         if (state == NULL) {
2193                 goto fail;
2194         }
2195         result->private_data = state;
2196
2197         state->ev = ev;
2198         state->cli = cli;
2199         state->op_num = op_num;
2200         state->req_data = req_data;
2201         state->req_data_sent = 0;
2202         state->call_id = get_rpc_call_id();
2203
2204         if (cli->max_xmit_frag
2205             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2206                 /* Server is screwed up ! */
2207                 status = NT_STATUS_INVALID_PARAMETER;
2208                 goto post_status;
2209         }
2210
2211         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2212
2213         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2214                       state, MARSHALL)) {
2215                 status = NT_STATUS_NO_MEMORY;
2216                 goto post_status;
2217         }
2218
2219         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2220
2221         status = prepare_next_frag(state, &is_last_frag);
2222         if (!NT_STATUS_IS_OK(status)) {
2223                 goto post_status;
2224         }
2225
2226         if (is_last_frag) {
2227                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2228                                            &state->outgoing_frag,
2229                                            RPC_RESPONSE);
2230                 if (subreq == NULL) {
2231                         status = NT_STATUS_NO_MEMORY;
2232                         goto post_status;
2233                 }
2234                 subreq->async.fn = rpc_api_pipe_req_done;
2235                 subreq->async.priv = result;
2236         } else {
2237                 subreq = rpc_write_send(state, ev, cli,
2238                                         prs_data_p(&state->outgoing_frag),
2239                                         prs_offset(&state->outgoing_frag));
2240                 if (subreq == NULL) {
2241                         status = NT_STATUS_NO_MEMORY;
2242                         goto post_status;
2243                 }
2244                 subreq->async.fn = rpc_api_pipe_req_write_done;
2245                 subreq->async.priv = result;
2246         }
2247         return result;
2248
2249  post_status:
2250         if (async_post_status(result, ev, status)) {
2251                 return result;
2252         }
2253  fail:
2254         TALLOC_FREE(result);
2255         return NULL;
2256 }
2257
2258 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2259                                   bool *is_last_frag)
2260 {
2261         RPC_HDR hdr;
2262         RPC_HDR_REQ hdr_req;
2263         uint32_t data_sent_thistime;
2264         uint16_t auth_len;
2265         uint16_t frag_len;
2266         uint8_t flags = 0;
2267         uint32_t ss_padding;
2268         uint32_t data_left;
2269         char pad[8] = { 0, };
2270         NTSTATUS status;
2271
2272         data_left = prs_offset(state->req_data) - state->req_data_sent;
2273
2274         data_sent_thistime = calculate_data_len_tosend(
2275                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2276
2277         if (state->req_data_sent == 0) {
2278                 flags = RPC_FLG_FIRST;
2279         }
2280
2281         if (data_sent_thistime == data_left) {
2282                 flags |= RPC_FLG_LAST;
2283         }
2284
2285         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2286                 return NT_STATUS_NO_MEMORY;
2287         }
2288
2289         /* Create and marshall the header and request header. */
2290         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2291                      auth_len);
2292
2293         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2294                 return NT_STATUS_NO_MEMORY;
2295         }
2296
2297         /* Create the rpc request RPC_HDR_REQ */
2298         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2299                          state->op_num);
2300
2301         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2302                                 &state->outgoing_frag, 0)) {
2303                 return NT_STATUS_NO_MEMORY;
2304         }
2305
2306         /* Copy in the data, plus any ss padding. */
2307         if (!prs_append_some_prs_data(&state->outgoing_frag,
2308                                       state->req_data, state->req_data_sent,
2309                                       data_sent_thistime)) {
2310                 return NT_STATUS_NO_MEMORY;
2311         }
2312
2313         /* Copy the sign/seal padding data. */
2314         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2315                 return NT_STATUS_NO_MEMORY;
2316         }
2317
2318         /* Generate any auth sign/seal and add the auth footer. */
2319         switch (state->cli->auth->auth_type) {
2320         case PIPE_AUTH_TYPE_NONE:
2321                 status = NT_STATUS_OK;
2322                 break;
2323         case PIPE_AUTH_TYPE_NTLMSSP:
2324         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2325                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2326                                                  &state->outgoing_frag);
2327                 break;
2328         case PIPE_AUTH_TYPE_SCHANNEL:
2329                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2330                                                   &state->outgoing_frag);
2331                 break;
2332         default:
2333                 status = NT_STATUS_INVALID_PARAMETER;
2334                 break;
2335         }
2336
2337         state->req_data_sent += data_sent_thistime;
2338         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2339
2340         return status;
2341 }
2342
2343 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2344 {
2345         struct async_req *req = talloc_get_type_abort(
2346                 subreq->async.priv, struct async_req);
2347         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2348                 req->private_data, struct rpc_api_pipe_req_state);
2349         NTSTATUS status;
2350         bool is_last_frag;
2351
2352         status = rpc_write_recv(subreq);
2353         TALLOC_FREE(subreq);
2354         if (!NT_STATUS_IS_OK(status)) {
2355                 async_req_error(req, status);
2356                 return;
2357         }
2358
2359         status = prepare_next_frag(state, &is_last_frag);
2360         if (!NT_STATUS_IS_OK(status)) {
2361                 async_req_error(req, status);
2362                 return;
2363         }
2364
2365         if (is_last_frag) {
2366                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2367                                            &state->outgoing_frag,
2368                                            RPC_RESPONSE);
2369                 if (async_req_nomem(subreq, req)) {
2370                         return;
2371                 }
2372                 subreq->async.fn = rpc_api_pipe_req_done;
2373                 subreq->async.priv = req;
2374         } else {
2375                 subreq = rpc_write_send(state, state->ev, state->cli,
2376                                         prs_data_p(&state->outgoing_frag),
2377                                         prs_offset(&state->outgoing_frag));
2378                 if (async_req_nomem(subreq, req)) {
2379                         return;
2380                 }
2381                 subreq->async.fn = rpc_api_pipe_req_write_done;
2382                 subreq->async.priv = req;
2383         }
2384 }
2385
2386 static void rpc_api_pipe_req_done(struct async_req *subreq)
2387 {
2388         struct async_req *req = talloc_get_type_abort(
2389                 subreq->async.priv, struct async_req);
2390         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2391                 req->private_data, struct rpc_api_pipe_req_state);
2392         NTSTATUS status;
2393
2394         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2395         TALLOC_FREE(subreq);
2396         if (!NT_STATUS_IS_OK(status)) {
2397                 async_req_error(req, status);
2398                 return;
2399         }
2400         async_req_done(req);
2401 }
2402
2403 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2404                                prs_struct *reply_pdu)
2405 {
2406         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2407                 req->private_data, struct rpc_api_pipe_req_state);
2408         NTSTATUS status;
2409
2410         if (async_req_is_error(req, &status)) {
2411                 return status;
2412         }
2413
2414         *reply_pdu = state->reply_pdu;
2415         reply_pdu->mem_ctx = mem_ctx;
2416
2417         /*
2418          * Prevent state->req_pdu from being freed in
2419          * rpc_api_pipe_req_state_destructor()
2420          */
2421         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2422
2423         return NT_STATUS_OK;
2424 }
2425
2426 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2427                         uint8 op_num,
2428                         prs_struct *in_data,
2429                         prs_struct *out_data)
2430 {
2431         TALLOC_CTX *frame = talloc_stackframe();
2432         struct event_context *ev;
2433         struct async_req *req;
2434         NTSTATUS status = NT_STATUS_NO_MEMORY;
2435
2436         ev = event_context_init(frame);
2437         if (ev == NULL) {
2438                 goto fail;
2439         }
2440
2441         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2442         if (req == NULL) {
2443                 goto fail;
2444         }
2445
2446         while (req->state < ASYNC_REQ_DONE) {
2447                 event_loop_once(ev);
2448         }
2449
2450         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2451  fail:
2452         TALLOC_FREE(frame);
2453         return status;
2454 }
2455
2456 #if 0
2457 /****************************************************************************
2458  Set the handle state.
2459 ****************************************************************************/
2460
2461 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2462                                    const char *pipe_name, uint16 device_state)
2463 {
2464         bool state_set = False;
2465         char param[2];
2466         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2467         char *rparam = NULL;
2468         char *rdata = NULL;
2469         uint32 rparam_len, rdata_len;
2470
2471         if (pipe_name == NULL)
2472                 return False;
2473
2474         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2475                  cli->fnum, pipe_name, device_state));
2476
2477         /* create parameters: device state */
2478         SSVAL(param, 0, device_state);
2479
2480         /* create setup parameters. */
2481         setup[0] = 0x0001; 
2482         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2483
2484         /* send the data on \PIPE\ */
2485         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2486                     setup, 2, 0,                /* setup, length, max */
2487                     param, 2, 0,                /* param, length, max */
2488                     NULL, 0, 1024,              /* data, length, max */
2489                     &rparam, &rparam_len,        /* return param, length */
2490                     &rdata, &rdata_len))         /* return data, length */
2491         {
2492                 DEBUG(5, ("Set Handle state: return OK\n"));
2493                 state_set = True;
2494         }
2495
2496         SAFE_FREE(rparam);
2497         SAFE_FREE(rdata);
2498
2499         return state_set;
2500 }
2501 #endif
2502
2503 /****************************************************************************
2504  Check the rpc bind acknowledge response.
2505 ****************************************************************************/
2506
2507 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2508 {
2509         if ( hdr_ba->addr.len == 0) {
2510                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2511         }
2512
2513         /* check the transfer syntax */
2514         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2515              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2516                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2517                 return False;
2518         }
2519
2520         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2521                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2522                           hdr_ba->res.num_results, hdr_ba->res.reason));
2523         }
2524
2525         DEBUG(5,("check_bind_response: accepted!\n"));
2526         return True;
2527 }
2528
2529 /*******************************************************************
2530  Creates a DCE/RPC bind authentication response.
2531  This is the packet that is sent back to the server once we
2532  have received a BIND-ACK, to finish the third leg of
2533  the authentication handshake.
2534  ********************************************************************/
2535
2536 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2537                                 uint32 rpc_call_id,
2538                                 enum pipe_auth_type auth_type,
2539                                 enum pipe_auth_level auth_level,
2540                                 DATA_BLOB *pauth_blob,
2541                                 prs_struct *rpc_out)
2542 {
2543         RPC_HDR hdr;
2544         RPC_HDR_AUTH hdr_auth;
2545         uint32 pad = 0;
2546
2547         /* Create the request RPC_HDR */
2548         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2549                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2550                      pauth_blob->length );
2551
2552         /* Marshall it. */
2553         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2554                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2555                 return NT_STATUS_NO_MEMORY;
2556         }
2557
2558         /*
2559                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2560                 about padding - shouldn't this pad to length 8 ? JRA.
2561         */
2562
2563         /* 4 bytes padding. */
2564         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2565                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2566                 return NT_STATUS_NO_MEMORY;
2567         }
2568
2569         /* Create the request RPC_HDR_AUTHA */
2570         init_rpc_hdr_auth(&hdr_auth,
2571                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2572                         auth_level, 0, 1);
2573
2574         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2575                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2576                 return NT_STATUS_NO_MEMORY;
2577         }
2578
2579         /*
2580          * Append the auth data to the outgoing buffer.
2581          */
2582
2583         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2584                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2585                 return NT_STATUS_NO_MEMORY;
2586         }
2587
2588         return NT_STATUS_OK;
2589 }
2590
2591 /*******************************************************************
2592  Creates a DCE/RPC bind alter context authentication request which
2593  may contain a spnego auth blobl
2594  ********************************************************************/
2595
2596 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2597                                         const RPC_IFACE *abstract,
2598                                         const RPC_IFACE *transfer,
2599                                         enum pipe_auth_level auth_level,
2600                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2601                                         prs_struct *rpc_out)
2602 {
2603         RPC_HDR_AUTH hdr_auth;
2604         prs_struct auth_info;
2605         NTSTATUS ret = NT_STATUS_OK;
2606
2607         ZERO_STRUCT(hdr_auth);
2608         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2609                 return NT_STATUS_NO_MEMORY;
2610
2611         /* We may change the pad length before marshalling. */
2612         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2613
2614         if (pauth_blob->length) {
2615                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2616                         prs_mem_free(&auth_info);
2617                         return NT_STATUS_NO_MEMORY;
2618                 }
2619         }
2620
2621         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2622                                                 rpc_out, 
2623                                                 rpc_call_id,
2624                                                 abstract,
2625                                                 transfer,
2626                                                 &hdr_auth,
2627                                                 &auth_info);
2628         prs_mem_free(&auth_info);
2629         return ret;
2630 }
2631
2632 /****************************************************************************
2633  Do an rpc bind.
2634 ****************************************************************************/
2635
2636 struct rpc_pipe_bind_state {
2637         struct event_context *ev;
2638         struct rpc_pipe_client *cli;
2639         prs_struct rpc_out;
2640         uint32_t rpc_call_id;
2641 };
2642
2643 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2644 {
2645         prs_mem_free(&state->rpc_out);
2646         return 0;
2647 }
2648
2649 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2650 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2651                                            struct rpc_pipe_bind_state *state,
2652                                            struct rpc_hdr_info *phdr,
2653                                            prs_struct *reply_pdu);
2654 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2655 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2656                                                     struct rpc_pipe_bind_state *state,
2657                                                     struct rpc_hdr_info *phdr,
2658                                                     prs_struct *reply_pdu);
2659 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2660
2661 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2662                                      struct event_context *ev,
2663                                      struct rpc_pipe_client *cli,
2664                                      struct cli_pipe_auth_data *auth)
2665 {
2666         struct async_req *result, *subreq;
2667         struct rpc_pipe_bind_state *state;
2668         NTSTATUS status;
2669
2670         result = async_req_new(mem_ctx);
2671         if (result == NULL) {
2672                 return NULL;
2673         }
2674         state = talloc(result, struct rpc_pipe_bind_state);
2675         if (state == NULL) {
2676                 goto fail;
2677         }
2678         result->private_data = state;
2679
2680         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2681                 rpccli_pipe_txt(debug_ctx(), cli),
2682                 (unsigned int)auth->auth_type,
2683                 (unsigned int)auth->auth_level ));
2684
2685         state->ev = ev;
2686         state->cli = cli;
2687         state->rpc_call_id = get_rpc_call_id();
2688
2689         prs_init_empty(&state->rpc_out, state, MARSHALL);
2690         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2691
2692         cli->auth = talloc_move(cli, &auth);
2693
2694         /* Marshall the outgoing data. */
2695         status = create_rpc_bind_req(cli, &state->rpc_out,
2696                                      state->rpc_call_id,
2697                                      &cli->abstract_syntax,
2698                                      &cli->transfer_syntax,
2699                                      cli->auth->auth_type,
2700                                      cli->auth->auth_level);
2701
2702         if (!NT_STATUS_IS_OK(status)) {
2703                 goto post_status;
2704         }
2705
2706         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2707                                    RPC_BINDACK);
2708         if (subreq == NULL) {
2709                 status = NT_STATUS_NO_MEMORY;
2710                 goto post_status;
2711         }
2712         subreq->async.fn = rpc_pipe_bind_step_one_done;
2713         subreq->async.priv = result;
2714         return result;
2715
2716  post_status:
2717         if (async_post_status(result, ev, status)) {
2718                 return result;
2719         }
2720  fail:
2721         TALLOC_FREE(result);
2722         return NULL;
2723 }
2724
2725 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2726 {
2727         struct async_req *req = talloc_get_type_abort(
2728                 subreq->async.priv, struct async_req);
2729         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2730                 req->private_data, struct rpc_pipe_bind_state);
2731         prs_struct reply_pdu;
2732         struct rpc_hdr_info hdr;
2733         struct rpc_hdr_ba_info hdr_ba;
2734         NTSTATUS status;
2735
2736         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2737         TALLOC_FREE(subreq);
2738         if (!NT_STATUS_IS_OK(status)) {
2739                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2740                           rpccli_pipe_txt(debug_ctx(), state->cli),
2741                           nt_errstr(status)));
2742                 async_req_error(req, status);
2743                 return;
2744         }
2745
2746         /* Unmarshall the RPC header */
2747         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2748                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2749                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2750                 return;
2751         }
2752
2753         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2754                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2755                           "RPC_HDR_BA.\n"));
2756                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2757                 return;
2758         }
2759
2760         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2761                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2762                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2763                 return;
2764         }
2765
2766         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2767         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2768
2769         /*
2770          * For authenticated binds we may need to do 3 or 4 leg binds.
2771          */
2772
2773         switch(state->cli->auth->auth_type) {
2774
2775         case PIPE_AUTH_TYPE_NONE:
2776         case PIPE_AUTH_TYPE_SCHANNEL:
2777                 /* Bind complete. */
2778                 async_req_done(req);
2779                 break;
2780
2781         case PIPE_AUTH_TYPE_NTLMSSP:
2782                 /* Need to send AUTH3 packet - no reply. */
2783                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2784                                                     &reply_pdu);
2785                 if (!NT_STATUS_IS_OK(status)) {
2786                         async_req_error(req, status);
2787                 }
2788                 break;
2789
2790         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2791                 /* Need to send alter context request and reply. */
2792                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2793                                                              &reply_pdu);
2794                 if (!NT_STATUS_IS_OK(status)) {
2795                         async_req_error(req, status);
2796                 }
2797                 break;
2798
2799         case PIPE_AUTH_TYPE_KRB5:
2800                 /* */
2801
2802         default:
2803                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2804                          (unsigned int)state->cli->auth->auth_type));
2805                 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2806         }
2807 }
2808
2809 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2810                                            struct rpc_pipe_bind_state *state,
2811                                            struct rpc_hdr_info *phdr,
2812                                            prs_struct *reply_pdu)
2813 {
2814         DATA_BLOB server_response = data_blob_null;
2815         DATA_BLOB client_reply = data_blob_null;
2816         struct rpc_hdr_auth_info hdr_auth;
2817         struct async_req *subreq;
2818         NTSTATUS status;
2819
2820         if ((phdr->auth_len == 0)
2821             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2822                 return NT_STATUS_INVALID_PARAMETER;
2823         }
2824
2825         if (!prs_set_offset(
2826                     reply_pdu,
2827                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2828                 return NT_STATUS_INVALID_PARAMETER;
2829         }
2830
2831         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2832                 return NT_STATUS_INVALID_PARAMETER;
2833         }
2834
2835         /* TODO - check auth_type/auth_level match. */
2836
2837         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2838         prs_copy_data_out((char *)server_response.data, reply_pdu,
2839                           phdr->auth_len);
2840
2841         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2842                                 server_response, &client_reply);
2843
2844         if (!NT_STATUS_IS_OK(status)) {
2845                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2846                           "blob failed: %s.\n", nt_errstr(status)));
2847                 return status;
2848         }
2849
2850         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2851
2852         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2853                                        state->cli->auth->auth_type,
2854                                        state->cli->auth->auth_level,
2855                                        &client_reply, &state->rpc_out);
2856         data_blob_free(&client_reply);
2857
2858         if (!NT_STATUS_IS_OK(status)) {
2859                 return status;
2860         }
2861
2862         subreq = rpc_write_send(state, state->ev, state->cli,
2863                                 prs_data_p(&state->rpc_out),
2864                                 prs_offset(&state->rpc_out));
2865         if (subreq == NULL) {
2866                 return NT_STATUS_NO_MEMORY;
2867         }
2868         subreq->async.fn = rpc_bind_auth3_write_done;
2869         subreq->async.priv = req;
2870         return NT_STATUS_OK;
2871 }
2872
2873 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2874 {
2875         struct async_req *req = talloc_get_type_abort(
2876                 subreq->async.priv, struct async_req);
2877         NTSTATUS status;
2878
2879         status = rpc_write_recv(subreq);
2880         TALLOC_FREE(subreq);
2881         if (!NT_STATUS_IS_OK(status)) {
2882                 async_req_error(req, status);
2883                 return;
2884         }
2885         async_req_done(req);
2886 }
2887
2888 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2889                                                     struct rpc_pipe_bind_state *state,
2890                                                     struct rpc_hdr_info *phdr,
2891                                                     prs_struct *reply_pdu)
2892 {
2893         DATA_BLOB server_spnego_response = data_blob_null;
2894         DATA_BLOB server_ntlm_response = data_blob_null;
2895         DATA_BLOB client_reply = data_blob_null;
2896         DATA_BLOB tmp_blob = data_blob_null;
2897         RPC_HDR_AUTH hdr_auth;
2898         struct async_req *subreq;
2899         NTSTATUS status;
2900
2901         if ((phdr->auth_len == 0)
2902             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2903                 return NT_STATUS_INVALID_PARAMETER;
2904         }
2905
2906         /* Process the returned NTLMSSP blob first. */
2907         if (!prs_set_offset(
2908                     reply_pdu,
2909                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2910                 return NT_STATUS_INVALID_PARAMETER;
2911         }
2912
2913         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2914                 return NT_STATUS_INVALID_PARAMETER;
2915         }
2916
2917         server_spnego_response = data_blob(NULL, phdr->auth_len);
2918         prs_copy_data_out((char *)server_spnego_response.data,
2919                           reply_pdu, phdr->auth_len);
2920
2921         /*
2922          * The server might give us back two challenges - tmp_blob is for the
2923          * second.
2924          */
2925         if (!spnego_parse_challenge(server_spnego_response,
2926                                     &server_ntlm_response, &tmp_blob)) {
2927                 data_blob_free(&server_spnego_response);
2928                 data_blob_free(&server_ntlm_response);
2929                 data_blob_free(&tmp_blob);
2930                 return NT_STATUS_INVALID_PARAMETER;
2931         }
2932
2933         /* We're finished with the server spnego response and the tmp_blob. */
2934         data_blob_free(&server_spnego_response);
2935         data_blob_free(&tmp_blob);
2936
2937         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2938                                 server_ntlm_response, &client_reply);
2939
2940         /* Finished with the server_ntlm response */
2941         data_blob_free(&server_ntlm_response);
2942
2943         if (!NT_STATUS_IS_OK(status)) {
2944                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2945                           "using server blob failed.\n"));
2946                 data_blob_free(&client_reply);
2947                 return status;
2948         }
2949
2950         /* SPNEGO wrap the client reply. */
2951         tmp_blob = spnego_gen_auth(client_reply);
2952         data_blob_free(&client_reply);
2953         client_reply = tmp_blob;
2954         tmp_blob = data_blob_null;
2955
2956         /* Now prepare the alter context pdu. */
2957         prs_init_empty(&state->rpc_out, state, MARSHALL);
2958
2959         status = create_rpc_alter_context(state->rpc_call_id,
2960                                           &state->cli->abstract_syntax,
2961                                           &state->cli->transfer_syntax,
2962                                           state->cli->auth->auth_level,
2963                                           &client_reply,
2964                                           &state->rpc_out);
2965         data_blob_free(&client_reply);
2966
2967         if (!NT_STATUS_IS_OK(status)) {
2968                 return status;
2969         }
2970
2971         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2972                                    &state->rpc_out, RPC_ALTCONTRESP);
2973         if (subreq == NULL) {
2974                 return NT_STATUS_NO_MEMORY;
2975         }
2976         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2977         subreq->async.priv = req;
2978         return NT_STATUS_OK;
2979 }
2980
2981 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2982 {
2983         struct async_req *req = talloc_get_type_abort(
2984                 subreq->async.priv, struct async_req);
2985         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2986                 req->private_data, struct rpc_pipe_bind_state);
2987         DATA_BLOB server_spnego_response = data_blob_null;
2988         DATA_BLOB tmp_blob = data_blob_null;
2989         prs_struct reply_pdu;
2990         struct rpc_hdr_info hdr;
2991         struct rpc_hdr_auth_info hdr_auth;
2992         NTSTATUS status;
2993
2994         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2995         TALLOC_FREE(subreq);
2996         if (!NT_STATUS_IS_OK(status)) {
2997                 async_req_error(req, status);
2998                 return;
2999         }
3000
3001         /* Get the auth blob from the reply. */
3002         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
3003                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
3004                           "unmarshall RPC_HDR.\n"));
3005                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
3006                 return;
3007         }
3008
3009         if (!prs_set_offset(
3010                     &reply_pdu,
3011                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
3012                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
3013                 return;
3014         }
3015
3016         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
3017                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
3018                 return;
3019         }
3020
3021         server_spnego_response = data_blob(NULL, hdr.auth_len);
3022         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
3023                           hdr.auth_len);
3024
3025         /* Check we got a valid auth response. */
3026         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
3027                                         OID_NTLMSSP, &tmp_blob)) {
3028                 data_blob_free(&server_spnego_response);
3029                 data_blob_free(&tmp_blob);
3030                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
3031                 return;
3032         }
3033
3034         data_blob_free(&server_spnego_response);
3035         data_blob_free(&tmp_blob);
3036
3037         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
3038                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
3039         async_req_done(req);
3040 }
3041
3042 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
3043 {
3044         return async_req_simple_recv(req);
3045 }
3046
3047 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3048                        struct cli_pipe_auth_data *auth)
3049 {
3050         TALLOC_CTX *frame = talloc_stackframe();
3051         struct event_context *ev;
3052         struct async_req *req;
3053         NTSTATUS status = NT_STATUS_NO_MEMORY;
3054
3055         ev = event_context_init(frame);
3056         if (ev == NULL) {
3057                 goto fail;
3058         }
3059
3060         req = rpc_pipe_bind_send(frame, ev, cli, auth);
3061         if (req == NULL) {
3062                 goto fail;
3063         }
3064
3065         while (req->state < ASYNC_REQ_DONE) {
3066                 event_loop_once(ev);
3067         }
3068
3069         status = rpc_pipe_bind_recv(req);
3070  fail:
3071         TALLOC_FREE(frame);
3072         return status;
3073 }
3074
3075 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3076                                 unsigned int timeout)
3077 {
3078         return cli_set_timeout(cli->trans.np.cli, timeout);
3079 }
3080
3081 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3082 {
3083         if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3084             || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3085                 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3086                 return true;
3087         }
3088
3089         if (cli->transport_type == NCACN_NP) {
3090                 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3091                 return true;
3092         }
3093
3094         return false;
3095 }
3096
3097 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3098 {
3099         if (p->transport_type == NCACN_NP) {
3100                 return p->trans.np.cli;
3101         }
3102         return NULL;
3103 }
3104
3105 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3106 {
3107         if (p->transport_type == NCACN_NP) {
3108                 bool ret;
3109                 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3110                 if (!ret) {
3111                         DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3112                                   "pipe %s. Error was %s\n",
3113                                   rpccli_pipe_txt(debug_ctx(), p),
3114                                   cli_errstr(p->trans.np.cli)));
3115                 }
3116
3117                 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3118                            rpccli_pipe_txt(debug_ctx(), p)));
3119
3120                 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3121                 return ret ? -1 : 0;
3122         }
3123
3124         return -1;
3125 }
3126
3127 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3128                                struct cli_pipe_auth_data **presult)
3129 {
3130         struct cli_pipe_auth_data *result;
3131
3132         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3133         if (result == NULL) {
3134                 return NT_STATUS_NO_MEMORY;
3135         }
3136
3137         result->auth_type = PIPE_AUTH_TYPE_NONE;
3138         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3139
3140         result->user_name = talloc_strdup(result, "");
3141         result->domain = talloc_strdup(result, "");
3142         if ((result->user_name == NULL) || (result->domain == NULL)) {
3143                 TALLOC_FREE(result);
3144                 return NT_STATUS_NO_MEMORY;
3145         }
3146
3147         *presult = result;
3148         return NT_STATUS_OK;
3149 }
3150
3151 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3152 {
3153         ntlmssp_end(&auth->a_u.ntlmssp_state);
3154         return 0;
3155 }
3156
3157 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3158                                   enum pipe_auth_type auth_type,
3159                                   enum pipe_auth_level auth_level,
3160                                   const char *domain,
3161                                   const char *username,
3162                                   const char *password,
3163                                   struct cli_pipe_auth_data **presult)
3164 {
3165         struct cli_pipe_auth_data *result;
3166         NTSTATUS status;
3167
3168         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3169         if (result == NULL) {
3170                 return NT_STATUS_NO_MEMORY;
3171         }
3172
3173         result->auth_type = auth_type;
3174         result->auth_level = auth_level;
3175
3176         result->user_name = talloc_strdup(result, username);
3177         result->domain = talloc_strdup(result, domain);
3178         if ((result->user_name == NULL) || (result->domain == NULL)) {
3179                 status = NT_STATUS_NO_MEMORY;
3180                 goto fail;
3181         }
3182
3183         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3184         if (!NT_STATUS_IS_OK(status)) {
3185                 goto fail;
3186         }
3187
3188         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3189
3190         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3191         if (!NT_STATUS_IS_OK(status)) {
3192                 goto fail;
3193         }
3194
3195         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3196         if (!NT_STATUS_IS_OK(status)) {
3197                 goto fail;
3198         }
3199
3200         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3201         if (!NT_STATUS_IS_OK(status)) {
3202                 goto fail;
3203         }
3204
3205         /*
3206          * Turn off sign+seal to allow selected auth level to turn it back on.
3207          */
3208         result->a_u.ntlmssp_state->neg_flags &=
3209                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3210
3211         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3212                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3213         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3214                 result->a_u.ntlmssp_state->neg_flags
3215                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3216         }
3217
3218         *presult = result;
3219         return NT_STATUS_OK;
3220
3221  fail:
3222         TALLOC_FREE(result);
3223         return status;
3224 }
3225
3226 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3227                                    enum pipe_auth_level auth_level,
3228                                    const uint8_t sess_key[16],
3229                                    struct cli_pipe_auth_data **presult)
3230 {
3231         struct cli_pipe_auth_data *result;
3232
3233         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3234         if (result == NULL) {
3235                 return NT_STATUS_NO_MEMORY;
3236         }
3237
3238         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3239         result->auth_level = auth_level;
3240
3241         result->user_name = talloc_strdup(result, "");
3242         result->domain = talloc_strdup(result, domain);
3243         if ((result->user_name == NULL) || (result->domain == NULL)) {
3244                 goto fail;
3245         }
3246
3247         result->a_u.schannel_auth = talloc(result,
3248                                            struct schannel_auth_struct);
3249         if (result->a_u.schannel_auth == NULL) {
3250                 goto fail;
3251         }
3252
3253         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3254                sizeof(result->a_u.schannel_auth->sess_key));
3255         result->a_u.schannel_auth->seq_num = 0;
3256
3257         *presult = result;
3258         return NT_STATUS_OK;
3259
3260  fail:
3261         TALLOC_FREE(result);
3262         return NT_STATUS_NO_MEMORY;
3263 }
3264
3265 #ifdef HAVE_KRB5
3266 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3267 {
3268         data_blob_free(&auth->session_key);
3269         return 0;
3270 }
3271 #endif
3272
3273 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3274                                    enum pipe_auth_level auth_level,
3275                                    const char *service_princ,
3276                                    const char *username,
3277                                    const char *password,
3278                                    struct cli_pipe_auth_data **presult)
3279 {
3280 #ifdef HAVE_KRB5
3281         struct cli_pipe_auth_data *result;
3282
3283         if ((username != NULL) && (password != NULL)) {
3284                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3285                 if (ret != 0) {
3286                         return NT_STATUS_ACCESS_DENIED;
3287                 }
3288         }
3289
3290         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3291         if (result == NULL) {
3292                 return NT_STATUS_NO_MEMORY;
3293         }
3294
3295         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3296         result->auth_level = auth_level;
3297
3298         /*
3299          * Username / domain need fixing!
3300          */
3301         result->user_name = talloc_strdup(result, "");
3302         result->domain = talloc_strdup(result, "");
3303         if ((result->user_name == NULL) || (result->domain == NULL)) {
3304                 goto fail;
3305         }
3306
3307         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3308                 result, struct kerberos_auth_struct);
3309         if (result->a_u.kerberos_auth == NULL) {
3310                 goto fail;
3311         }
3312         talloc_set_destructor(result->a_u.kerberos_auth,
3313                               cli_auth_kerberos_data_destructor);
3314
3315         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3316                 result, service_princ);
3317         if (result->a_u.kerberos_auth->service_principal == NULL) {
3318                 goto fail;
3319         }
3320
3321         *presult = result;
3322         return NT_STATUS_OK;
3323
3324  fail:
3325         TALLOC_FREE(result);
3326         return NT_STATUS_NO_MEMORY;
3327 #else
3328         return NT_STATUS_NOT_SUPPORTED;
3329 #endif
3330 }
3331
3332 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3333 {
3334         close(p->trans.sock.fd);
3335         return 0;
3336 }
3337
3338 /**
3339  * Create an rpc pipe client struct, connecting to a tcp port.
3340  */
3341 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3342                                        uint16_t port,
3343                                        const struct ndr_syntax_id *abstract_syntax,
3344                                        struct rpc_pipe_client **presult)
3345 {
3346         struct rpc_pipe_client *result;
3347         struct sockaddr_storage addr;
3348         NTSTATUS status;
3349
3350         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3351         if (result == NULL) {
3352                 return NT_STATUS_NO_MEMORY;
3353         }
3354
3355         result->transport_type = NCACN_IP_TCP;
3356
3357         result->abstract_syntax = *abstract_syntax;
3358         result->transfer_syntax = ndr_transfer_syntax;
3359
3360         result->desthost = talloc_strdup(result, host);
3361         result->srv_name_slash = talloc_asprintf_strupper_m(
3362                 result, "\\\\%s", result->desthost);
3363         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3364                 status = NT_STATUS_NO_MEMORY;
3365                 goto fail;
3366         }
3367
3368         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3369         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3370
3371         if (!resolve_name(host, &addr, 0)) {
3372                 status = NT_STATUS_NOT_FOUND;
3373                 goto fail;
3374         }
3375
3376         status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3377         if (!NT_STATUS_IS_OK(status)) {
3378                 goto fail;
3379         }
3380
3381         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3382
3383         *presult = result;
3384         return NT_STATUS_OK;
3385
3386  fail:
3387         TALLOC_FREE(result);
3388         return status;
3389 }
3390
3391 /**
3392  * Determine the tcp port on which a dcerpc interface is listening
3393  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3394  * target host.
3395  */
3396 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3397                                       const struct ndr_syntax_id *abstract_syntax,
3398                                       uint16_t *pport)
3399 {
3400         NTSTATUS status;
3401         struct rpc_pipe_client *epm_pipe = NULL;
3402         struct cli_pipe_auth_data *auth = NULL;
3403         struct dcerpc_binding *map_binding = NULL;
3404         struct dcerpc_binding *res_binding = NULL;
3405         struct epm_twr_t *map_tower = NULL;
3406         struct epm_twr_t *res_towers = NULL;
3407         struct policy_handle *entry_handle = NULL;
3408         uint32_t num_towers = 0;
3409         uint32_t max_towers = 1;
3410         struct epm_twr_p_t towers;
3411         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3412
3413         if (pport == NULL) {
3414                 status = NT_STATUS_INVALID_PARAMETER;
3415                 goto done;
3416         }
3417
3418         /* open the connection to the endpoint mapper */
3419         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3420                                         &ndr_table_epmapper.syntax_id,
3421                                         &epm_pipe);
3422
3423         if (!NT_STATUS_IS_OK(status)) {
3424                 goto done;
3425         }
3426
3427         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3428         if (!NT_STATUS_IS_OK(status)) {
3429                 goto done;
3430         }
3431
3432         status = rpc_pipe_bind(epm_pipe, auth);
3433         if (!NT_STATUS_IS_OK(status)) {
3434                 goto done;
3435         }
3436
3437         /* create tower for asking the epmapper */
3438
3439         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3440         if (map_binding == NULL) {
3441                 status = NT_STATUS_NO_MEMORY;
3442                 goto done;
3443         }
3444
3445         map_binding->transport = NCACN_IP_TCP;
3446         map_binding->object = *abstract_syntax;
3447         map_binding->host = host; /* needed? */
3448         map_binding->endpoint = "0"; /* correct? needed? */
3449
3450         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3451         if (map_tower == NULL) {
3452                 status = NT_STATUS_NO_MEMORY;
3453                 goto done;
3454         }
3455
3456         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3457                                             &(map_tower->tower));
3458         if (!NT_STATUS_IS_OK(status)) {
3459                 goto done;
3460         }
3461
3462         /* allocate further parameters for the epm_Map call */
3463
3464         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3465         if (res_towers == NULL) {
3466                 status = NT_STATUS_NO_MEMORY;
3467                 goto done;
3468         }
3469         towers.twr = res_towers;
3470
3471         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3472         if (entry_handle == NULL) {
3473                 status = NT_STATUS_NO_MEMORY;
3474                 goto done;
3475         }
3476
3477         /* ask the endpoint mapper for the port */
3478
3479         status = rpccli_epm_Map(epm_pipe,
3480                                 tmp_ctx,
3481                                 CONST_DISCARD(struct GUID *,
3482                                               &(abstract_syntax->uuid)),
3483                                 map_tower,
3484                                 entry_handle,
3485                                 max_towers,
3486                                 &num_towers,
3487                                 &towers);
3488
3489         if (!NT_STATUS_IS_OK(status)) {
3490                 goto done;
3491         }
3492
3493         if (num_towers != 1) {
3494                 status = NT_STATUS_UNSUCCESSFUL;
3495                 goto done;
3496         }
3497
3498         /* extract the port from the answer */
3499
3500         status = dcerpc_binding_from_tower(tmp_ctx,
3501                                            &(towers.twr->tower),
3502                                            &res_binding);
3503         if (!NT_STATUS_IS_OK(status)) {
3504                 goto done;
3505         }
3506
3507         /* are further checks here necessary? */
3508         if (res_binding->transport != NCACN_IP_TCP) {
3509                 status = NT_STATUS_UNSUCCESSFUL;
3510                 goto done;
3511         }
3512
3513         *pport = (uint16_t)atoi(res_binding->endpoint);
3514
3515 done:
3516         TALLOC_FREE(tmp_ctx);
3517         return status;
3518 }
3519
3520 /**
3521  * Create a rpc pipe client struct, connecting to a host via tcp.
3522  * The port is determined by asking the endpoint mapper on the given
3523  * host.
3524  */
3525 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3526                            const struct ndr_syntax_id *abstract_syntax,
3527                            struct rpc_pipe_client **presult)
3528 {
3529         NTSTATUS status;
3530         uint16_t port = 0;
3531
3532         *presult = NULL;
3533
3534         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3535         if (!NT_STATUS_IS_OK(status)) {
3536                 goto done;
3537         }
3538
3539         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3540                                         abstract_syntax, presult);
3541
3542 done:
3543         return status;
3544 }
3545
3546 /********************************************************************
3547  Create a rpc pipe client struct, connecting to a unix domain socket
3548  ********************************************************************/
3549 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3550                                const struct ndr_syntax_id *abstract_syntax,
3551                                struct rpc_pipe_client **presult)
3552 {
3553         struct rpc_pipe_client *result;
3554         struct sockaddr_un addr;
3555         NTSTATUS status;
3556
3557         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3558         if (result == NULL) {
3559                 return NT_STATUS_NO_MEMORY;
3560         }
3561
3562         result->transport_type = NCACN_UNIX_STREAM;
3563
3564         result->abstract_syntax = *abstract_syntax;
3565         result->transfer_syntax = ndr_transfer_syntax;
3566
3567         result->desthost = talloc_get_myname(result);
3568         result->srv_name_slash = talloc_asprintf_strupper_m(
3569                 result, "\\\\%s", result->desthost);
3570         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3571                 status = NT_STATUS_NO_MEMORY;
3572                 goto fail;
3573         }
3574
3575         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3576         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3577
3578         result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3579         if (result->trans.sock.fd == -1) {
3580                 status = map_nt_error_from_unix(errno);
3581                 goto fail;
3582         }
3583
3584         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3585
3586         ZERO_STRUCT(addr);
3587         addr.sun_family = AF_UNIX;
3588         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3589
3590         if (sys_connect(result->trans.sock.fd,
3591                         (struct sockaddr *)&addr) == -1) {
3592                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3593                           strerror(errno)));
3594                 close(result->trans.sock.fd);
3595                 return map_nt_error_from_unix(errno);
3596         }
3597
3598         *presult = result;
3599         return NT_STATUS_OK;
3600
3601  fail:
3602         TALLOC_FREE(result);
3603         return status;
3604 }
3605
3606
3607 /****************************************************************************
3608  Open a named pipe over SMB to a remote server.
3609  *
3610  * CAVEAT CALLER OF THIS FUNCTION:
3611  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3612  *    so be sure that this function is called AFTER any structure (vs pointer)
3613  *    assignment of the cli.  In particular, libsmbclient does structure
3614  *    assignments of cli, which invalidates the data in the returned
3615  *    rpc_pipe_client if this function is called before the structure assignment
3616  *    of cli.
3617  * 
3618  ****************************************************************************/
3619
3620 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3621                                  const struct ndr_syntax_id *abstract_syntax,
3622                                  struct rpc_pipe_client **presult)
3623 {
3624         struct rpc_pipe_client *result;
3625         int fnum;
3626
3627         /* sanity check to protect against crashes */
3628
3629         if ( !cli ) {
3630                 return NT_STATUS_INVALID_HANDLE;
3631         }
3632
3633         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3634         if (result == NULL) {
3635                 return NT_STATUS_NO_MEMORY;
3636         }
3637
3638         result->transport_type = NCACN_NP;
3639
3640         result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3641                 result, cli, abstract_syntax);
3642         if (result->trans.np.pipe_name == NULL) {
3643                 DEBUG(1, ("Could not find pipe for interface\n"));
3644                 TALLOC_FREE(result);
3645                 return NT_STATUS_INVALID_PARAMETER;
3646         }
3647
3648         result->trans.np.cli = cli;
3649         result->abstract_syntax = *abstract_syntax;
3650         result->transfer_syntax = ndr_transfer_syntax;
3651         result->desthost = talloc_strdup(result, cli->desthost);
3652         result->srv_name_slash = talloc_asprintf_strupper_m(
3653                 result, "\\\\%s", result->desthost);
3654
3655         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3656         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3657
3658         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3659                 TALLOC_FREE(result);
3660                 return NT_STATUS_NO_MEMORY;
3661         }
3662
3663         fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3664                              DESIRED_ACCESS_PIPE);
3665         if (fnum == -1) {
3666                 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3667                          "to machine %s.  Error was %s\n",
3668                          result->trans.np.pipe_name, cli->desthost,
3669                          cli_errstr(cli)));
3670                 TALLOC_FREE(result);
3671                 return cli_get_nt_error(cli);
3672         }
3673
3674         result->trans.np.fnum = fnum;
3675
3676         DLIST_ADD(cli->pipe_list, result);
3677         talloc_set_destructor(result, rpc_pipe_destructor);
3678
3679         *presult = result;
3680         return NT_STATUS_OK;
3681 }
3682
3683 /****************************************************************************
3684  Open a pipe to a remote server.
3685  ****************************************************************************/
3686
3687 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3688                                   const struct ndr_syntax_id *interface,
3689                                   struct rpc_pipe_client **presult)
3690 {
3691         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3692                 /*
3693                  * We should have a better way to figure out this drsuapi
3694                  * speciality...
3695                  */
3696                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3697                                          presult);
3698         }
3699
3700         return rpc_pipe_open_np(cli, interface, presult);
3701 }
3702
3703 /****************************************************************************
3704  Open a named pipe to an SMB server and bind anonymously.
3705  ****************************************************************************/
3706
3707 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3708                                   const struct ndr_syntax_id *interface,
3709                                   struct rpc_pipe_client **presult)
3710 {
3711         struct rpc_pipe_client *result;
3712         struct cli_pipe_auth_data *auth;
3713         NTSTATUS status;
3714
3715         status = cli_rpc_pipe_open(cli, interface, &result);
3716         if (!NT_STATUS_IS_OK(status)) {
3717                 return status;
3718         }
3719
3720         status = rpccli_anon_bind_data(result, &auth);
3721         if (!NT_STATUS_IS_OK(status)) {
3722                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3723                           nt_errstr(status)));
3724                 TALLOC_FREE(result);
3725                 return status;
3726         }
3727
3728         /*
3729          * This is a bit of an abstraction violation due to the fact that an
3730          * anonymous bind on an authenticated SMB inherits the user/domain
3731          * from the enclosing SMB creds
3732          */
3733
3734         TALLOC_FREE(auth->user_name);
3735         TALLOC_FREE(auth->domain);
3736
3737         auth->user_name = talloc_strdup(auth, cli->user_name);
3738         auth->domain = talloc_strdup(auth, cli->domain);
3739         auth->user_session_key = data_blob_talloc(auth,
3740                 cli->user_session_key.data,
3741                 cli->user_session_key.length);
3742
3743         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3744                 TALLOC_FREE(result);
3745                 return NT_STATUS_NO_MEMORY;
3746         }
3747
3748         status = rpc_pipe_bind(result, auth);
3749         if (!NT_STATUS_IS_OK(status)) {
3750                 int lvl = 0;
3751                 if (ndr_syntax_id_equal(interface,
3752                                         &ndr_table_dssetup.syntax_id)) {
3753                         /* non AD domains just don't have this pipe, avoid
3754                          * level 0 statement in that case - gd */
3755                         lvl = 3;
3756                 }
3757                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3758                             "%s failed with error %s\n",
3759                             cli_get_pipe_name_from_iface(debug_ctx(), cli,
3760                                                          interface),
3761                             nt_errstr(status) ));
3762                 TALLOC_FREE(result);
3763                 return status;
3764         }
3765
3766         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3767                   "%s and bound anonymously.\n", result->trans.np.pipe_name,
3768                   cli->desthost ));
3769
3770         *presult = result;
3771         return NT_STATUS_OK;
3772 }
3773
3774 /****************************************************************************
3775  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3776  ****************************************************************************/
3777
3778 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3779                                                    const struct ndr_syntax_id *interface,
3780                                                    enum pipe_auth_type auth_type,
3781                                                    enum pipe_auth_level auth_level,
3782                                                    const char *domain,
3783                                                    const char *username,
3784                                                    const char *password,
3785                                                    struct rpc_pipe_client **presult)
3786 {
3787         struct rpc_pipe_client *result;
3788         struct cli_pipe_auth_data *auth;
3789         NTSTATUS status;
3790
3791         status = cli_rpc_pipe_open(cli, interface, &result);
3792         if (!NT_STATUS_IS_OK(status)) {
3793                 return status;
3794         }
3795
3796         status = rpccli_ntlmssp_bind_data(
3797                 result, auth_type, auth_level, domain, username,
3798                 cli->pwd.null_pwd ? NULL : password, &auth);
3799         if (!NT_STATUS_IS_OK(status)) {
3800                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3801                           nt_errstr(status)));
3802                 goto err;
3803         }
3804
3805         status = rpc_pipe_bind(result, auth);
3806         if (!NT_STATUS_IS_OK(status)) {
3807                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3808                         nt_errstr(status) ));
3809                 goto err;
3810         }
3811
3812         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3813                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3814                 result->trans.np.pipe_name, cli->desthost,
3815                 domain, username ));
3816
3817         *presult = result;
3818         return NT_STATUS_OK;
3819
3820   err:
3821
3822         TALLOC_FREE(result);
3823         return status;
3824 }
3825
3826 /****************************************************************************
3827  External interface.
3828  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3829  ****************************************************************************/
3830
3831 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3832                                    const struct ndr_syntax_id *interface,
3833                                    enum pipe_auth_level auth_level,
3834                                    const char *domain,
3835                                    const char *username,
3836                                    const char *password,
3837                                    struct rpc_pipe_client **presult)
3838 {
3839         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3840                                                 interface,
3841                                                 PIPE_AUTH_TYPE_NTLMSSP,
3842                                                 auth_level,
3843                                                 domain,
3844                                                 username,
3845                                                 password,
3846                                                 presult);
3847 }
3848
3849 /****************************************************************************
3850  External interface.
3851  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3852  ****************************************************************************/
3853
3854 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3855                                           const struct ndr_syntax_id *interface,
3856                                           enum pipe_auth_level auth_level,
3857                                           const char *domain,
3858                                           const char *username,
3859                                           const char *password,
3860                                           struct rpc_pipe_client **presult)
3861 {
3862         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3863                                                 interface,
3864                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3865                                                 auth_level,
3866                                                 domain,
3867                                                 username,
3868                                                 password,
3869                                                 presult);
3870 }
3871
3872 /****************************************************************************
3873   Get a the schannel session key out of an already opened netlogon pipe.
3874  ****************************************************************************/
3875 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3876                                                 struct cli_state *cli,
3877                                                 const char *domain,
3878                                                 uint32 *pneg_flags)
3879 {
3880         uint32 sec_chan_type = 0;
3881         unsigned char machine_pwd[16];
3882         const char *machine_account;
3883         NTSTATUS status;
3884
3885         /* Get the machine account credentials from secrets.tdb. */
3886         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3887                                &sec_chan_type))
3888         {
3889                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3890                         "trust account password for domain '%s'\n",
3891                         domain));
3892                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3893         }
3894
3895         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3896                                         cli->desthost, /* server name */
3897                                         domain,        /* domain */
3898                                         global_myname(), /* client name */
3899                                         machine_account, /* machine account name */
3900                                         machine_pwd,
3901                                         sec_chan_type,
3902                                         pneg_flags);
3903
3904         if (!NT_STATUS_IS_OK(status)) {
3905                 DEBUG(3, ("get_schannel_session_key_common: "
3906                           "rpccli_netlogon_setup_creds failed with result %s "
3907                           "to server %s, domain %s, machine account %s.\n",
3908                           nt_errstr(status), cli->desthost, domain,
3909                           machine_account ));
3910                 return status;
3911         }
3912
3913         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3914                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3915                         cli->desthost));
3916                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3917         }
3918
3919         return NT_STATUS_OK;;
3920 }
3921
3922 /****************************************************************************
3923  Open a netlogon pipe and get the schannel session key.
3924  Now exposed to external callers.
3925  ****************************************************************************/
3926
3927
3928 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3929                                   const char *domain,
3930                                   uint32 *pneg_flags,
3931                                   struct rpc_pipe_client **presult)
3932 {
3933         struct rpc_pipe_client *netlogon_pipe = NULL;
3934         NTSTATUS status;
3935
3936         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3937                                           &netlogon_pipe);
3938         if (!NT_STATUS_IS_OK(status)) {
3939                 return status;
3940         }
3941
3942         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3943                                                  pneg_flags);
3944         if (!NT_STATUS_IS_OK(status)) {
3945                 TALLOC_FREE(netlogon_pipe);
3946                 return status;
3947         }
3948
3949         *presult = netlogon_pipe;
3950         return NT_STATUS_OK;
3951 }
3952
3953 /****************************************************************************
3954  External interface.
3955  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3956  using session_key. sign and seal.
3957  ****************************************************************************/
3958
3959 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3960                                              const struct ndr_syntax_id *interface,
3961                                              enum pipe_auth_level auth_level,
3962                                              const char *domain,
3963                                              const struct dcinfo *pdc,
3964                                              struct rpc_pipe_client **presult)
3965 {
3966         struct rpc_pipe_client *result;
3967         struct cli_pipe_auth_data *auth;
3968         NTSTATUS status;
3969
3970         status = cli_rpc_pipe_open(cli, interface, &result);
3971         if (!NT_STATUS_IS_OK(status)) {
3972                 return status;
3973         }
3974
3975         status = rpccli_schannel_bind_data(result, domain, auth_level,
3976                                            pdc->sess_key, &auth);
3977         if (!NT_STATUS_IS_OK(status)) {
3978                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3979                           nt_errstr(status)));
3980                 TALLOC_FREE(result);
3981                 return status;
3982         }
3983
3984         status = rpc_pipe_bind(result, auth);
3985         if (!NT_STATUS_IS_OK(status)) {
3986                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3987                           "cli_rpc_pipe_bind failed with error %s\n",
3988                           nt_errstr(status) ));
3989                 TALLOC_FREE(result);
3990                 return status;
3991         }
3992
3993         /*
3994          * The credentials on a new netlogon pipe are the ones we are passed
3995          * in - copy them over.
3996          */
3997         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3998         if (result->dc == NULL) {
3999                 DEBUG(0, ("talloc failed\n"));
4000                 TALLOC_FREE(result);
4001                 return NT_STATUS_NO_MEMORY;
4002         }
4003
4004         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
4005                 "for domain %s "
4006                 "and bound using schannel.\n",
4007                 result->trans.np.pipe_name, cli->desthost, domain ));
4008
4009         *presult = result;
4010         return NT_STATUS_OK;
4011 }
4012
4013 /****************************************************************************
4014  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4015  Fetch the session key ourselves using a temporary netlogon pipe. This
4016  version uses an ntlmssp auth bound netlogon pipe to get the key.
4017  ****************************************************************************/
4018
4019 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
4020                                                       const char *domain,
4021                                                       const char *username,
4022                                                       const char *password,
4023                                                       uint32 *pneg_flags,
4024                                                       struct rpc_pipe_client **presult)
4025 {
4026         struct rpc_pipe_client *netlogon_pipe = NULL;
4027         NTSTATUS status;
4028
4029         status = cli_rpc_pipe_open_spnego_ntlmssp(
4030                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
4031                 domain, username, password, &netlogon_pipe);
4032         if (!NT_STATUS_IS_OK(status)) {
4033                 return status;
4034         }
4035
4036         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
4037                                                  pneg_flags);
4038         if (!NT_STATUS_IS_OK(status)) {
4039                 TALLOC_FREE(netlogon_pipe);
4040                 return status;
4041         }
4042
4043         *presult = netlogon_pipe;
4044         return NT_STATUS_OK;
4045 }
4046
4047 /****************************************************************************
4048  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4049  Fetch the session key ourselves using a temporary netlogon pipe. This version
4050  uses an ntlmssp bind to get the session key.
4051  ****************************************************************************/
4052
4053 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4054                                                  const struct ndr_syntax_id *interface,
4055                                                  enum pipe_auth_level auth_level,
4056                                                  const char *domain,
4057                                                  const char *username,
4058                                                  const char *password,
4059                                                  struct rpc_pipe_client **presult)
4060 {
4061         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4062         struct rpc_pipe_client *netlogon_pipe = NULL;
4063         struct rpc_pipe_client *result = NULL;
4064         NTSTATUS status;
4065
4066         status = get_schannel_session_key_auth_ntlmssp(
4067                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4068         if (!NT_STATUS_IS_OK(status)) {
4069                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4070                         "key from server %s for domain %s.\n",
4071                         cli->desthost, domain ));
4072                 return status;
4073         }
4074
4075         status = cli_rpc_pipe_open_schannel_with_key(
4076                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4077                 &result);
4078
4079         /* Now we've bound using the session key we can close the netlog pipe. */
4080         TALLOC_FREE(netlogon_pipe);
4081
4082         if (NT_STATUS_IS_OK(status)) {
4083                 *presult = result;
4084         }
4085         return status;
4086 }
4087
4088 /****************************************************************************
4089  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4090  Fetch the session key ourselves using a temporary netlogon pipe.
4091  ****************************************************************************/
4092
4093 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4094                                     const struct ndr_syntax_id *interface,
4095                                     enum pipe_auth_level auth_level,
4096                                     const char *domain,
4097                                     struct rpc_pipe_client **presult)
4098 {
4099         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4100         struct rpc_pipe_client *netlogon_pipe = NULL;
4101         struct rpc_pipe_client *result = NULL;
4102         NTSTATUS status;
4103
4104         status = get_schannel_session_key(cli, domain, &neg_flags,
4105                                           &netlogon_pipe);
4106         if (!NT_STATUS_IS_OK(status)) {
4107                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4108                         "key from server %s for domain %s.\n",
4109                         cli->desthost, domain ));
4110                 return status;
4111         }
4112
4113         status = cli_rpc_pipe_open_schannel_with_key(
4114                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4115                 &result);
4116
4117         /* Now we've bound using the session key we can close the netlog pipe. */
4118         TALLOC_FREE(netlogon_pipe);
4119
4120         if (NT_STATUS_IS_OK(status)) {
4121                 *presult = result;
4122         }
4123
4124         return NT_STATUS_OK;
4125 }
4126
4127 /****************************************************************************
4128  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4129  The idea is this can be called with service_princ, username and password all
4130  NULL so long as the caller has a TGT.
4131  ****************************************************************************/
4132
4133 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4134                                 const struct ndr_syntax_id *interface,
4135                                 enum pipe_auth_level auth_level,
4136                                 const char *service_princ,
4137                                 const char *username,
4138                                 const char *password,
4139                                 struct rpc_pipe_client **presult)
4140 {
4141 #ifdef HAVE_KRB5
4142         struct rpc_pipe_client *result;
4143         struct cli_pipe_auth_data *auth;
4144         NTSTATUS status;
4145
4146         status = cli_rpc_pipe_open(cli, interface, &result);
4147         if (!NT_STATUS_IS_OK(status)) {
4148                 return status;
4149         }
4150
4151         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4152                                            username, password, &auth);
4153         if (!NT_STATUS_IS_OK(status)) {
4154                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4155                           nt_errstr(status)));
4156                 TALLOC_FREE(result);
4157                 return status;
4158         }
4159
4160         status = rpc_pipe_bind(result, auth);
4161         if (!NT_STATUS_IS_OK(status)) {
4162                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4163                           "with error %s\n", nt_errstr(status)));
4164                 TALLOC_FREE(result);
4165                 return status;
4166         }
4167
4168         *presult = result;
4169         return NT_STATUS_OK;
4170 #else
4171         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4172         return NT_STATUS_NOT_IMPLEMENTED;
4173 #endif
4174 }
4175
4176 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4177                              struct rpc_pipe_client *cli,
4178                              DATA_BLOB *session_key)
4179 {
4180         if (!session_key || !cli) {
4181                 return NT_STATUS_INVALID_PARAMETER;
4182         }
4183
4184         if (!cli->auth) {
4185                 return NT_STATUS_INVALID_PARAMETER;
4186         }
4187
4188         switch (cli->auth->auth_type) {
4189                 case PIPE_AUTH_TYPE_SCHANNEL:
4190                         *session_key = data_blob_talloc(mem_ctx,
4191                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4192                         break;
4193                 case PIPE_AUTH_TYPE_NTLMSSP:
4194                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4195                         *session_key = data_blob_talloc(mem_ctx,
4196                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4197                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4198                         break;
4199                 case PIPE_AUTH_TYPE_KRB5:
4200                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4201                         *session_key = data_blob_talloc(mem_ctx,
4202                                 cli->auth->a_u.kerberos_auth->session_key.data,
4203                                 cli->auth->a_u.kerberos_auth->session_key.length);
4204                         break;
4205                 case PIPE_AUTH_TYPE_NONE:
4206                         *session_key = data_blob_talloc(mem_ctx,
4207                                 cli->auth->user_session_key.data,
4208                                 cli->auth->user_session_key.length);
4209                         break;
4210                 default:
4211                         return NT_STATUS_NO_USER_SESSION_KEY;
4212         }
4213
4214         return NT_STATUS_OK;
4215 }