Convert cli_api_pipe to tevent_req
[kai/samba.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct async_req *subreq);
211
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                         struct event_context *ev,
214                                         struct rpc_cli_transport *transport,
215                                         uint8_t *data, size_t size)
216 {
217         struct tevent_req *req;
218         struct async_req *subreq;
219         struct rpc_read_state *state;
220
221         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
222         if (req == NULL) {
223                 return NULL;
224         }
225         state->ev = ev;
226         state->transport = transport;
227         state->data = data;
228         state->size = size;
229         state->num_read = 0;
230
231         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232
233         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
234                                       transport->priv);
235         if (subreq == NULL) {
236                 goto fail;
237         }
238         subreq->async.fn = rpc_read_done;
239         subreq->async.priv = req;
240         return req;
241
242  fail:
243         TALLOC_FREE(req);
244         return NULL;
245 }
246
247 static void rpc_read_done(struct async_req *subreq)
248 {
249         struct tevent_req *req = talloc_get_type_abort(
250                 subreq->async.priv, struct tevent_req);
251         struct rpc_read_state *state = tevent_req_data(
252                 req, struct rpc_read_state);
253         NTSTATUS status;
254         ssize_t received;
255
256         status = state->transport->read_recv(subreq, &received);
257         TALLOC_FREE(subreq);
258         if (!NT_STATUS_IS_OK(status)) {
259                 tevent_req_nterror(req, status);
260                 return;
261         }
262
263         state->num_read += received;
264         if (state->num_read == state->size) {
265                 tevent_req_done(req);
266                 return;
267         }
268
269         subreq = state->transport->read_send(state, state->ev,
270                                              state->data + state->num_read,
271                                              state->size - state->num_read,
272                                              state->transport->priv);
273         if (tevent_req_nomem(subreq, req)) {
274                 return;
275         }
276         subreq->async.fn = rpc_read_done;
277         subreq->async.priv = req;
278 }
279
280 static NTSTATUS rpc_read_recv(struct tevent_req *req)
281 {
282         return tevent_req_simple_recv_ntstatus(req);
283 }
284
285 struct rpc_write_state {
286         struct event_context *ev;
287         struct rpc_cli_transport *transport;
288         const uint8_t *data;
289         size_t size;
290         size_t num_written;
291 };
292
293 static void rpc_write_done(struct async_req *subreq);
294
295 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
296                                          struct event_context *ev,
297                                          struct rpc_cli_transport *transport,
298                                          const uint8_t *data, size_t size)
299 {
300         struct tevent_req *req;
301         struct async_req *subreq;
302         struct rpc_write_state *state;
303
304         req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
305         if (req == NULL) {
306                 return NULL;
307         }
308         state->ev = ev;
309         state->transport = transport;
310         state->data = data;
311         state->size = size;
312         state->num_written = 0;
313
314         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
315
316         subreq = transport->write_send(state, ev, data, size, transport->priv);
317         if (subreq == NULL) {
318                 goto fail;
319         }
320         subreq->async.fn = rpc_write_done;
321         subreq->async.priv = req;
322         return req;
323  fail:
324         TALLOC_FREE(req);
325         return NULL;
326 }
327
328 static void rpc_write_done(struct async_req *subreq)
329 {
330         struct tevent_req *req = talloc_get_type_abort(
331                 subreq->async.priv, struct tevent_req);
332         struct rpc_write_state *state = tevent_req_data(
333                 req, struct rpc_write_state);
334         NTSTATUS status;
335         ssize_t written;
336
337         status = state->transport->write_recv(subreq, &written);
338         TALLOC_FREE(subreq);
339         if (!NT_STATUS_IS_OK(status)) {
340                 tevent_req_nterror(req, status);
341                 return;
342         }
343
344         state->num_written += written;
345
346         if (state->num_written == state->size) {
347                 tevent_req_done(req);
348                 return;
349         }
350
351         subreq = state->transport->write_send(state, state->ev,
352                                               state->data + state->num_written,
353                                               state->size - state->num_written,
354                                               state->transport->priv);
355         if (tevent_req_nomem(subreq, req)) {
356                 return;
357         }
358         subreq->async.fn = rpc_write_done;
359         subreq->async.priv = req;
360 }
361
362 static NTSTATUS rpc_write_recv(struct tevent_req *req)
363 {
364         return tevent_req_simple_recv_ntstatus(req);
365 }
366
367
368 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
369                                  struct rpc_hdr_info *prhdr,
370                                  prs_struct *pdu)
371 {
372         /*
373          * This next call sets the endian bit correctly in current_pdu. We
374          * will propagate this to rbuf later.
375          */
376
377         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
378                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
379                 return NT_STATUS_BUFFER_TOO_SMALL;
380         }
381
382         if (prhdr->frag_len > cli->max_recv_frag) {
383                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
384                           " we only allow %d\n", (int)prhdr->frag_len,
385                           (int)cli->max_recv_frag));
386                 return NT_STATUS_BUFFER_TOO_SMALL;
387         }
388
389         return NT_STATUS_OK;
390 }
391
392 /****************************************************************************
393  Try and get a PDU's worth of data from current_pdu. If not, then read more
394  from the wire.
395  ****************************************************************************/
396
397 struct get_complete_frag_state {
398         struct event_context *ev;
399         struct rpc_pipe_client *cli;
400         struct rpc_hdr_info *prhdr;
401         prs_struct *pdu;
402 };
403
404 static void get_complete_frag_got_header(struct tevent_req *subreq);
405 static void get_complete_frag_got_rest(struct tevent_req *subreq);
406
407 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
408                                                  struct event_context *ev,
409                                                  struct rpc_pipe_client *cli,
410                                                  struct rpc_hdr_info *prhdr,
411                                                  prs_struct *pdu)
412 {
413         struct tevent_req *req, *subreq;
414         struct get_complete_frag_state *state;
415         uint32_t pdu_len;
416         NTSTATUS status;
417
418         req = tevent_req_create(mem_ctx, &state,
419                                 struct get_complete_frag_state);
420         if (req == NULL) {
421                 return NULL;
422         }
423         state->ev = ev;
424         state->cli = cli;
425         state->prhdr = prhdr;
426         state->pdu = pdu;
427
428         pdu_len = prs_data_size(pdu);
429         if (pdu_len < RPC_HEADER_LEN) {
430                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
431                         status = NT_STATUS_NO_MEMORY;
432                         goto post_status;
433                 }
434                 subreq = rpc_read_send(
435                         state, state->ev,
436                         state->cli->transport,
437                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
438                         RPC_HEADER_LEN - pdu_len);
439                 if (subreq == NULL) {
440                         status = NT_STATUS_NO_MEMORY;
441                         goto post_status;
442                 }
443                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
444                                         req);
445                 return req;
446         }
447
448         status = parse_rpc_header(cli, prhdr, pdu);
449         if (!NT_STATUS_IS_OK(status)) {
450                 goto post_status;
451         }
452
453         /*
454          * Ensure we have frag_len bytes of data.
455          */
456         if (pdu_len < prhdr->frag_len) {
457                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
458                         status = NT_STATUS_NO_MEMORY;
459                         goto post_status;
460                 }
461                 subreq = rpc_read_send(state, state->ev,
462                                        state->cli->transport,
463                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
464                                        prhdr->frag_len - pdu_len);
465                 if (subreq == NULL) {
466                         status = NT_STATUS_NO_MEMORY;
467                         goto post_status;
468                 }
469                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
470                                         req);
471                 return req;
472         }
473
474         status = NT_STATUS_OK;
475  post_status:
476         if (NT_STATUS_IS_OK(status)) {
477                 tevent_req_done(req);
478         } else {
479                 tevent_req_nterror(req, status);
480         }
481         return tevent_req_post(req, ev);
482 }
483
484 static void get_complete_frag_got_header(struct tevent_req *subreq)
485 {
486         struct tevent_req *req = tevent_req_callback_data(
487                 subreq, struct tevent_req);
488         struct get_complete_frag_state *state = tevent_req_data(
489                 req, struct get_complete_frag_state);
490         NTSTATUS status;
491
492         status = rpc_read_recv(subreq);
493         TALLOC_FREE(subreq);
494         if (!NT_STATUS_IS_OK(status)) {
495                 tevent_req_nterror(req, status);
496                 return;
497         }
498
499         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
500         if (!NT_STATUS_IS_OK(status)) {
501                 tevent_req_nterror(req, status);
502                 return;
503         }
504
505         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
506                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
507                 return;
508         }
509
510         /*
511          * We're here in this piece of code because we've read exactly
512          * RPC_HEADER_LEN bytes into state->pdu.
513          */
514
515         subreq = rpc_read_send(
516                 state, state->ev, state->cli->transport,
517                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
518                 state->prhdr->frag_len - RPC_HEADER_LEN);
519         if (tevent_req_nomem(subreq, req)) {
520                 return;
521         }
522         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
523 }
524
525 static void get_complete_frag_got_rest(struct tevent_req *subreq)
526 {
527         struct tevent_req *req = tevent_req_callback_data(
528                 subreq, struct tevent_req);
529         NTSTATUS status;
530
531         status = rpc_read_recv(subreq);
532         TALLOC_FREE(subreq);
533         if (!NT_STATUS_IS_OK(status)) {
534                 tevent_req_nterror(req, status);
535                 return;
536         }
537         tevent_req_done(req);
538 }
539
540 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
541 {
542         return tevent_req_simple_recv_ntstatus(req);
543 }
544
545 /****************************************************************************
546  NTLMSSP specific sign/seal.
547  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
548  In fact I should probably abstract these into identical pieces of code... JRA.
549  ****************************************************************************/
550
551 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
552                                 prs_struct *current_pdu,
553                                 uint8 *p_ss_padding_len)
554 {
555         RPC_HDR_AUTH auth_info;
556         uint32 save_offset = prs_offset(current_pdu);
557         uint32 auth_len = prhdr->auth_len;
558         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
559         unsigned char *data = NULL;
560         size_t data_len;
561         unsigned char *full_packet_data = NULL;
562         size_t full_packet_data_len;
563         DATA_BLOB auth_blob;
564         NTSTATUS status;
565
566         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
567             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
568                 return NT_STATUS_OK;
569         }
570
571         if (!ntlmssp_state) {
572                 return NT_STATUS_INVALID_PARAMETER;
573         }
574
575         /* Ensure there's enough data for an authenticated response. */
576         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
577                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
578                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
579                         (unsigned int)auth_len ));
580                 return NT_STATUS_BUFFER_TOO_SMALL;
581         }
582
583         /*
584          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
585          * after the RPC header.
586          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
587          * functions as NTLMv2 checks the rpc headers also.
588          */
589
590         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
591         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
592
593         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
594         full_packet_data_len = prhdr->frag_len - auth_len;
595
596         /* Pull the auth header and the following data into a blob. */
597         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
598                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
599                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
600                 return NT_STATUS_BUFFER_TOO_SMALL;
601         }
602
603         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
604                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
605                 return NT_STATUS_BUFFER_TOO_SMALL;
606         }
607
608         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
609         auth_blob.length = auth_len;
610
611         switch (cli->auth->auth_level) {
612                 case PIPE_AUTH_LEVEL_PRIVACY:
613                         /* Data is encrypted. */
614                         status = ntlmssp_unseal_packet(ntlmssp_state,
615                                                         data, data_len,
616                                                         full_packet_data,
617                                                         full_packet_data_len,
618                                                         &auth_blob);
619                         if (!NT_STATUS_IS_OK(status)) {
620                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
621                                         "packet from %s. Error was %s.\n",
622                                         rpccli_pipe_txt(debug_ctx(), cli),
623                                         nt_errstr(status) ));
624                                 return status;
625                         }
626                         break;
627                 case PIPE_AUTH_LEVEL_INTEGRITY:
628                         /* Data is signed. */
629                         status = ntlmssp_check_packet(ntlmssp_state,
630                                                         data, data_len,
631                                                         full_packet_data,
632                                                         full_packet_data_len,
633                                                         &auth_blob);
634                         if (!NT_STATUS_IS_OK(status)) {
635                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
636                                         "packet from %s. Error was %s.\n",
637                                         rpccli_pipe_txt(debug_ctx(), cli),
638                                         nt_errstr(status) ));
639                                 return status;
640                         }
641                         break;
642                 default:
643                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
644                                   "auth level %d\n", cli->auth->auth_level));
645                         return NT_STATUS_INVALID_INFO_CLASS;
646         }
647
648         /*
649          * Return the current pointer to the data offset.
650          */
651
652         if(!prs_set_offset(current_pdu, save_offset)) {
653                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
654                         (unsigned int)save_offset ));
655                 return NT_STATUS_BUFFER_TOO_SMALL;
656         }
657
658         /*
659          * Remember the padding length. We must remove it from the real data
660          * stream once the sign/seal is done.
661          */
662
663         *p_ss_padding_len = auth_info.auth_pad_len;
664
665         return NT_STATUS_OK;
666 }
667
668 /****************************************************************************
669  schannel specific sign/seal.
670  ****************************************************************************/
671
672 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
673                                 prs_struct *current_pdu,
674                                 uint8 *p_ss_padding_len)
675 {
676         RPC_HDR_AUTH auth_info;
677         RPC_AUTH_SCHANNEL_CHK schannel_chk;
678         uint32 auth_len = prhdr->auth_len;
679         uint32 save_offset = prs_offset(current_pdu);
680         struct schannel_auth_struct *schannel_auth =
681                 cli->auth->a_u.schannel_auth;
682         uint32 data_len;
683
684         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
685             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
686                 return NT_STATUS_OK;
687         }
688
689         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
690                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
691                 return NT_STATUS_INVALID_PARAMETER;
692         }
693
694         if (!schannel_auth) {
695                 return NT_STATUS_INVALID_PARAMETER;
696         }
697
698         /* Ensure there's enough data for an authenticated response. */
699         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
700                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
701                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
702                         (unsigned int)auth_len ));
703                 return NT_STATUS_INVALID_PARAMETER;
704         }
705
706         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
707
708         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
709                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
710                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
711                 return NT_STATUS_BUFFER_TOO_SMALL;
712         }
713
714         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
715                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
716                 return NT_STATUS_BUFFER_TOO_SMALL;
717         }
718
719         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
720                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
721                         auth_info.auth_type));
722                 return NT_STATUS_BUFFER_TOO_SMALL;
723         }
724
725         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
726                                 &schannel_chk, current_pdu, 0)) {
727                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
728                 return NT_STATUS_BUFFER_TOO_SMALL;
729         }
730
731         if (!schannel_decode(schannel_auth,
732                         cli->auth->auth_level,
733                         SENDER_IS_ACCEPTOR,
734                         &schannel_chk,
735                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
736                         data_len)) {
737                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
738                                 "Connection to %s.\n",
739                                 rpccli_pipe_txt(debug_ctx(), cli)));
740                 return NT_STATUS_INVALID_PARAMETER;
741         }
742
743         /* The sequence number gets incremented on both send and receive. */
744         schannel_auth->seq_num++;
745
746         /*
747          * Return the current pointer to the data offset.
748          */
749
750         if(!prs_set_offset(current_pdu, save_offset)) {
751                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
752                         (unsigned int)save_offset ));
753                 return NT_STATUS_BUFFER_TOO_SMALL;
754         }
755
756         /*
757          * Remember the padding length. We must remove it from the real data
758          * stream once the sign/seal is done.
759          */
760
761         *p_ss_padding_len = auth_info.auth_pad_len;
762
763         return NT_STATUS_OK;
764 }
765
766 /****************************************************************************
767  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
768  ****************************************************************************/
769
770 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
771                                 prs_struct *current_pdu,
772                                 uint8 *p_ss_padding_len)
773 {
774         NTSTATUS ret = NT_STATUS_OK;
775
776         /* Paranioa checks for auth_len. */
777         if (prhdr->auth_len) {
778                 if (prhdr->auth_len > prhdr->frag_len) {
779                         return NT_STATUS_INVALID_PARAMETER;
780                 }
781
782                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
783                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
784                         /* Integer wrap attempt. */
785                         return NT_STATUS_INVALID_PARAMETER;
786                 }
787         }
788
789         /*
790          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
791          */
792
793         switch(cli->auth->auth_type) {
794                 case PIPE_AUTH_TYPE_NONE:
795                         if (prhdr->auth_len) {
796                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
797                                           "Connection to %s - got non-zero "
798                                           "auth len %u.\n",
799                                         rpccli_pipe_txt(debug_ctx(), cli),
800                                         (unsigned int)prhdr->auth_len ));
801                                 return NT_STATUS_INVALID_PARAMETER;
802                         }
803                         break;
804
805                 case PIPE_AUTH_TYPE_NTLMSSP:
806                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
807                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
808                         if (!NT_STATUS_IS_OK(ret)) {
809                                 return ret;
810                         }
811                         break;
812
813                 case PIPE_AUTH_TYPE_SCHANNEL:
814                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
815                         if (!NT_STATUS_IS_OK(ret)) {
816                                 return ret;
817                         }
818                         break;
819
820                 case PIPE_AUTH_TYPE_KRB5:
821                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
822                 default:
823                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
824                                   "to %s - unknown internal auth type %u.\n",
825                                   rpccli_pipe_txt(debug_ctx(), cli),
826                                   cli->auth->auth_type ));
827                         return NT_STATUS_INVALID_INFO_CLASS;
828         }
829
830         return NT_STATUS_OK;
831 }
832
833 /****************************************************************************
834  Do basic authentication checks on an incoming pdu.
835  ****************************************************************************/
836
837 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
838                         prs_struct *current_pdu,
839                         uint8 expected_pkt_type,
840                         char **ppdata,
841                         uint32 *pdata_len,
842                         prs_struct *return_data)
843 {
844
845         NTSTATUS ret = NT_STATUS_OK;
846         uint32 current_pdu_len = prs_data_size(current_pdu);
847
848         if (current_pdu_len != prhdr->frag_len) {
849                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
850                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
851                 return NT_STATUS_INVALID_PARAMETER;
852         }
853
854         /*
855          * Point the return values at the real data including the RPC
856          * header. Just in case the caller wants it.
857          */
858         *ppdata = prs_data_p(current_pdu);
859         *pdata_len = current_pdu_len;
860
861         /* Ensure we have the correct type. */
862         switch (prhdr->pkt_type) {
863                 case RPC_ALTCONTRESP:
864                 case RPC_BINDACK:
865
866                         /* Alter context and bind ack share the same packet definitions. */
867                         break;
868
869
870                 case RPC_RESPONSE:
871                 {
872                         RPC_HDR_RESP rhdr_resp;
873                         uint8 ss_padding_len = 0;
874
875                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
876                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
877                                 return NT_STATUS_BUFFER_TOO_SMALL;
878                         }
879
880                         /* Here's where we deal with incoming sign/seal. */
881                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
882                                         current_pdu, &ss_padding_len);
883                         if (!NT_STATUS_IS_OK(ret)) {
884                                 return ret;
885                         }
886
887                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
888                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
889
890                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
891                                 return NT_STATUS_BUFFER_TOO_SMALL;
892                         }
893
894                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
895
896                         /* Remember to remove the auth footer. */
897                         if (prhdr->auth_len) {
898                                 /* We've already done integer wrap tests on auth_len in
899                                         cli_pipe_validate_rpc_response(). */
900                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
901                                         return NT_STATUS_BUFFER_TOO_SMALL;
902                                 }
903                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
904                         }
905
906                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
907                                 current_pdu_len, *pdata_len, ss_padding_len ));
908
909                         /*
910                          * If this is the first reply, and the allocation hint is reasonably, try and
911                          * set up the return_data parse_struct to the correct size.
912                          */
913
914                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
915                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
916                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
917                                                 "too large to allocate\n",
918                                                 (unsigned int)rhdr_resp.alloc_hint ));
919                                         return NT_STATUS_NO_MEMORY;
920                                 }
921                         }
922
923                         break;
924                 }
925
926                 case RPC_BINDNACK:
927                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
928                                   "received from %s!\n",
929                                   rpccli_pipe_txt(debug_ctx(), cli)));
930                         /* Use this for now... */
931                         return NT_STATUS_NETWORK_ACCESS_DENIED;
932
933                 case RPC_FAULT:
934                 {
935                         RPC_HDR_RESP rhdr_resp;
936                         RPC_HDR_FAULT fault_resp;
937
938                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
939                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
940                                 return NT_STATUS_BUFFER_TOO_SMALL;
941                         }
942
943                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
944                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
945                                 return NT_STATUS_BUFFER_TOO_SMALL;
946                         }
947
948                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
949                                   "code %s received from %s!\n",
950                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
951                                 rpccli_pipe_txt(debug_ctx(), cli)));
952                         if (NT_STATUS_IS_OK(fault_resp.status)) {
953                                 return NT_STATUS_UNSUCCESSFUL;
954                         } else {
955                                 return fault_resp.status;
956                         }
957                 }
958
959                 default:
960                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
961                                 "from %s!\n",
962                                 (unsigned int)prhdr->pkt_type,
963                                 rpccli_pipe_txt(debug_ctx(), cli)));
964                         return NT_STATUS_INVALID_INFO_CLASS;
965         }
966
967         if (prhdr->pkt_type != expected_pkt_type) {
968                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
969                           "got an unexpected RPC packet type - %u, not %u\n",
970                         rpccli_pipe_txt(debug_ctx(), cli),
971                         prhdr->pkt_type,
972                         expected_pkt_type));
973                 return NT_STATUS_INVALID_INFO_CLASS;
974         }
975
976         /* Do this just before return - we don't want to modify any rpc header
977            data before now as we may have needed to do cryptographic actions on
978            it before. */
979
980         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
981                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
982                         "setting fragment first/last ON.\n"));
983                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
984         }
985
986         return NT_STATUS_OK;
987 }
988
989 /****************************************************************************
990  Ensure we eat the just processed pdu from the current_pdu prs_struct.
991  Normally the frag_len and buffer size will match, but on the first trans
992  reply there is a theoretical chance that buffer size > frag_len, so we must
993  deal with that.
994  ****************************************************************************/
995
996 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
997 {
998         uint32 current_pdu_len = prs_data_size(current_pdu);
999
1000         if (current_pdu_len < prhdr->frag_len) {
1001                 return NT_STATUS_BUFFER_TOO_SMALL;
1002         }
1003
1004         /* Common case. */
1005         if (current_pdu_len == (uint32)prhdr->frag_len) {
1006                 prs_mem_free(current_pdu);
1007                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1008                 /* Make current_pdu dynamic with no memory. */
1009                 prs_give_memory(current_pdu, 0, 0, True);
1010                 return NT_STATUS_OK;
1011         }
1012
1013         /*
1014          * Oh no ! More data in buffer than we processed in current pdu.
1015          * Cheat. Move the data down and shrink the buffer.
1016          */
1017
1018         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1019                         current_pdu_len - prhdr->frag_len);
1020
1021         /* Remember to set the read offset back to zero. */
1022         prs_set_offset(current_pdu, 0);
1023
1024         /* Shrink the buffer. */
1025         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1026                 return NT_STATUS_BUFFER_TOO_SMALL;
1027         }
1028
1029         return NT_STATUS_OK;
1030 }
1031
1032 /****************************************************************************
1033  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1034 ****************************************************************************/
1035
1036 struct cli_api_pipe_state {
1037         struct event_context *ev;
1038         struct rpc_cli_transport *transport;
1039         uint8_t *rdata;
1040         uint32_t rdata_len;
1041 };
1042
1043 static void cli_api_pipe_trans_done(struct async_req *subreq);
1044 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1045 static void cli_api_pipe_read_done(struct async_req *subreq);
1046
1047 static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1048                                             struct event_context *ev,
1049                                             struct rpc_cli_transport *transport,
1050                                             uint8_t *data, size_t data_len,
1051                                             uint32_t max_rdata_len)
1052 {
1053         struct tevent_req *req;
1054         struct async_req *subreq;
1055         struct tevent_req *subreq2;
1056         struct cli_api_pipe_state *state;
1057         NTSTATUS status;
1058
1059         req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
1060         if (req == NULL) {
1061                 return NULL;
1062         }
1063         state->ev = ev;
1064         state->transport = transport;
1065
1066         if (max_rdata_len < RPC_HEADER_LEN) {
1067                 /*
1068                  * For a RPC reply we always need at least RPC_HEADER_LEN
1069                  * bytes. We check this here because we will receive
1070                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1071                  */
1072                 status = NT_STATUS_INVALID_PARAMETER;
1073                 goto post_status;
1074         }
1075
1076         if (transport->trans_send != NULL) {
1077                 subreq = transport->trans_send(state, ev, data, data_len,
1078                                                max_rdata_len, transport->priv);
1079                 if (subreq == NULL) {
1080                         status = NT_STATUS_NO_MEMORY;
1081                         goto post_status;
1082                 }
1083                 subreq->async.fn = cli_api_pipe_trans_done;
1084                 subreq->async.priv = req;
1085                 return req;
1086         }
1087
1088         /*
1089          * If the transport does not provide a "trans" routine, i.e. for
1090          * example the ncacn_ip_tcp transport, do the write/read step here.
1091          */
1092
1093         subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1094         if (subreq2 == NULL) {
1095                 goto fail;
1096         }
1097         tevent_req_set_callback(subreq2, cli_api_pipe_write_done, req);
1098         return req;
1099
1100         status = NT_STATUS_INVALID_PARAMETER;
1101
1102  post_status:
1103         if (NT_STATUS_IS_OK(status)) {
1104                 tevent_req_done(req);
1105         } else {
1106                 tevent_req_nterror(req, status);
1107         }
1108         return tevent_req_post(req, ev);
1109  fail:
1110         TALLOC_FREE(req);
1111         return NULL;
1112 }
1113
1114 static void cli_api_pipe_trans_done(struct async_req *subreq)
1115 {
1116         struct tevent_req *req = talloc_get_type_abort(
1117                 subreq->async.priv, struct tevent_req);
1118         struct cli_api_pipe_state *state = tevent_req_data(
1119                 req, struct cli_api_pipe_state);
1120         NTSTATUS status;
1121
1122         status = state->transport->trans_recv(subreq, state, &state->rdata,
1123                                               &state->rdata_len);
1124         TALLOC_FREE(subreq);
1125         if (!NT_STATUS_IS_OK(status)) {
1126                 tevent_req_nterror(req, status);
1127                 return;
1128         }
1129         tevent_req_done(req);
1130 }
1131
1132 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1133 {
1134         struct tevent_req *req = tevent_req_callback_data(
1135                 subreq, struct tevent_req);
1136         struct cli_api_pipe_state *state = tevent_req_data(
1137                 req, struct cli_api_pipe_state);
1138         struct async_req *subreq2;
1139         NTSTATUS status;
1140
1141         status = rpc_write_recv(subreq);
1142         TALLOC_FREE(subreq);
1143         if (!NT_STATUS_IS_OK(status)) {
1144                 tevent_req_nterror(req, status);
1145                 return;
1146         }
1147
1148         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1149         if (tevent_req_nomem(state->rdata, req)) {
1150                 return;
1151         }
1152
1153         /*
1154          * We don't need to use rpc_read_send here, the upper layer will cope
1155          * with a short read, transport->trans_send could also return less
1156          * than state->max_rdata_len.
1157          */
1158         subreq2 = state->transport->read_send(state, state->ev, state->rdata,
1159                                               RPC_HEADER_LEN,
1160                                               state->transport->priv);
1161         if (tevent_req_nomem(subreq2, req)) {
1162                 return;
1163         }
1164         subreq2->async.fn = cli_api_pipe_read_done;
1165         subreq2->async.priv = req;
1166 }
1167
1168 static void cli_api_pipe_read_done(struct async_req *subreq)
1169 {
1170         struct tevent_req *req = talloc_get_type_abort(
1171                 subreq->async.priv, struct tevent_req);
1172         struct cli_api_pipe_state *state = tevent_req_data(
1173                 req, struct cli_api_pipe_state);
1174         NTSTATUS status;
1175         ssize_t received;
1176
1177         status = state->transport->read_recv(subreq, &received);
1178         TALLOC_FREE(subreq);
1179         if (!NT_STATUS_IS_OK(status)) {
1180                 tevent_req_nterror(req, status);
1181                 return;
1182         }
1183         state->rdata_len = received;
1184         tevent_req_done(req);
1185 }
1186
1187 static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1188                                   uint8_t **prdata, uint32_t *prdata_len)
1189 {
1190         struct cli_api_pipe_state *state = tevent_req_data(
1191                 req, struct cli_api_pipe_state);
1192         NTSTATUS status;
1193
1194         if (tevent_req_is_nterror(req, &status)) {
1195                 return status;
1196         }
1197
1198         *prdata = talloc_move(mem_ctx, &state->rdata);
1199         *prdata_len = state->rdata_len;
1200         return NT_STATUS_OK;
1201 }
1202
1203 /****************************************************************************
1204  Send data on an rpc pipe via trans. The prs_struct data must be the last
1205  pdu fragment of an NDR data stream.
1206
1207  Receive response data from an rpc pipe, which may be large...
1208
1209  Read the first fragment: unfortunately have to use SMBtrans for the first
1210  bit, then SMBreadX for subsequent bits.
1211
1212  If first fragment received also wasn't the last fragment, continue
1213  getting fragments until we _do_ receive the last fragment.
1214
1215  Request/Response PDU's look like the following...
1216
1217  |<------------------PDU len----------------------------------------------->|
1218  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1219
1220  +------------+-----------------+-------------+---------------+-------------+
1221  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1222  +------------+-----------------+-------------+---------------+-------------+
1223
1224  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1225  signing & sealing being negotiated.
1226
1227  ****************************************************************************/
1228
1229 struct rpc_api_pipe_state {
1230         struct event_context *ev;
1231         struct rpc_pipe_client *cli;
1232         uint8_t expected_pkt_type;
1233
1234         prs_struct incoming_frag;
1235         struct rpc_hdr_info rhdr;
1236
1237         prs_struct incoming_pdu;        /* Incoming reply */
1238         uint32_t incoming_pdu_offset;
1239 };
1240
1241 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1242 {
1243         prs_mem_free(&state->incoming_frag);
1244         prs_mem_free(&state->incoming_pdu);
1245         return 0;
1246 }
1247
1248 static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
1249 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1250
1251 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1252                                            struct event_context *ev,
1253                                            struct rpc_pipe_client *cli,
1254                                            prs_struct *data, /* Outgoing PDU */
1255                                            uint8_t expected_pkt_type)
1256 {
1257         struct async_req *result;
1258         struct tevent_req *subreq;
1259         struct rpc_api_pipe_state *state;
1260         uint16_t max_recv_frag;
1261         NTSTATUS status;
1262
1263         if (!async_req_setup(mem_ctx, &result, &state,
1264                              struct rpc_api_pipe_state)) {
1265                 return NULL;
1266         }
1267         state->ev = ev;
1268         state->cli = cli;
1269         state->expected_pkt_type = expected_pkt_type;
1270         state->incoming_pdu_offset = 0;
1271
1272         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1273
1274         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1275         /* Make incoming_pdu dynamic with no memory. */
1276         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1277
1278         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1279
1280         /*
1281          * Ensure we're not sending too much.
1282          */
1283         if (prs_offset(data) > cli->max_xmit_frag) {
1284                 status = NT_STATUS_INVALID_PARAMETER;
1285                 goto post_status;
1286         }
1287
1288         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1289
1290         max_recv_frag = cli->max_recv_frag;
1291
1292 #ifdef DEVELOPER
1293         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1294 #endif
1295
1296         subreq = cli_api_pipe_send(state, ev, cli->transport,
1297                                    (uint8_t *)prs_data_p(data),
1298                                    prs_offset(data), max_recv_frag);
1299         if (subreq == NULL) {
1300                 status = NT_STATUS_NO_MEMORY;
1301                 goto post_status;
1302         }
1303         tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, result);
1304         return result;
1305
1306  post_status:
1307         if (async_post_ntstatus(result, ev, status)) {
1308                 return result;
1309         }
1310         TALLOC_FREE(result);
1311         return NULL;
1312 }
1313
1314 static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
1315 {
1316         struct async_req *req = tevent_req_callback_data(
1317                 subreq, struct async_req);
1318         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1319                 req->private_data, struct rpc_api_pipe_state);
1320         NTSTATUS status;
1321         uint8_t *rdata = NULL;
1322         uint32_t rdata_len = 0;
1323         char *rdata_copy;
1324
1325         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1326         TALLOC_FREE(subreq);
1327         if (!NT_STATUS_IS_OK(status)) {
1328                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1329                 async_req_nterror(req, status);
1330                 return;
1331         }
1332
1333         if (rdata == NULL) {
1334                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1335                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1336                 async_req_done(req);
1337                 return;
1338         }
1339
1340         /*
1341          * Give the memory received from cli_trans as dynamic to the current
1342          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1343          * :-(
1344          */
1345         rdata_copy = (char *)memdup(rdata, rdata_len);
1346         TALLOC_FREE(rdata);
1347         if (async_req_nomem(rdata_copy, req)) {
1348                 return;
1349         }
1350         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1351
1352         /* Ensure we have enough data for a pdu. */
1353         subreq = get_complete_frag_send(state, state->ev, state->cli,
1354                                         &state->rhdr, &state->incoming_frag);
1355         if (async_req_nomem(subreq, req)) {
1356                 return;
1357         }
1358         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1359 }
1360
1361 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1362 {
1363         struct async_req *req = tevent_req_callback_data(
1364                 subreq, struct async_req);
1365         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1366                 req->private_data, struct rpc_api_pipe_state);
1367         NTSTATUS status;
1368         char *rdata = NULL;
1369         uint32_t rdata_len = 0;
1370
1371         status = get_complete_frag_recv(subreq);
1372         TALLOC_FREE(subreq);
1373         if (!NT_STATUS_IS_OK(status)) {
1374                 DEBUG(5, ("get_complete_frag failed: %s\n",
1375                           nt_errstr(status)));
1376                 async_req_nterror(req, status);
1377                 return;
1378         }
1379
1380         status = cli_pipe_validate_current_pdu(
1381                 state->cli, &state->rhdr, &state->incoming_frag,
1382                 state->expected_pkt_type, &rdata, &rdata_len,
1383                 &state->incoming_pdu);
1384
1385         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1386                   (unsigned)prs_data_size(&state->incoming_frag),
1387                   (unsigned)state->incoming_pdu_offset,
1388                   nt_errstr(status)));
1389
1390         if (!NT_STATUS_IS_OK(status)) {
1391                 async_req_nterror(req, status);
1392                 return;
1393         }
1394
1395         if ((state->rhdr.flags & RPC_FLG_FIRST)
1396             && (state->rhdr.pack_type[0] == 0)) {
1397                 /*
1398                  * Set the data type correctly for big-endian data on the
1399                  * first packet.
1400                  */
1401                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1402                           "big-endian.\n",
1403                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1404                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1405         }
1406         /*
1407          * Check endianness on subsequent packets.
1408          */
1409         if (state->incoming_frag.bigendian_data
1410             != state->incoming_pdu.bigendian_data) {
1411                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1412                          "%s\n",
1413                          state->incoming_pdu.bigendian_data?"big":"little",
1414                          state->incoming_frag.bigendian_data?"big":"little"));
1415                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1416                 return;
1417         }
1418
1419         /* Now copy the data portion out of the pdu into rbuf. */
1420         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1421                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1422                 return;
1423         }
1424
1425         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1426                rdata, (size_t)rdata_len);
1427         state->incoming_pdu_offset += rdata_len;
1428
1429         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1430                                             &state->incoming_frag);
1431         if (!NT_STATUS_IS_OK(status)) {
1432                 async_req_nterror(req, status);
1433                 return;
1434         }
1435
1436         if (state->rhdr.flags & RPC_FLG_LAST) {
1437                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1438                           rpccli_pipe_txt(debug_ctx(), state->cli),
1439                           (unsigned)prs_data_size(&state->incoming_pdu)));
1440                 async_req_done(req);
1441                 return;
1442         }
1443
1444         subreq = get_complete_frag_send(state, state->ev, state->cli,
1445                                         &state->rhdr, &state->incoming_frag);
1446         if (async_req_nomem(subreq, req)) {
1447                 return;
1448         }
1449         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1450 }
1451
1452 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1453                                   prs_struct *reply_pdu)
1454 {
1455         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1456                 req->private_data, struct rpc_api_pipe_state);
1457         NTSTATUS status;
1458
1459         if (async_req_is_nterror(req, &status)) {
1460                 return status;
1461         }
1462
1463         *reply_pdu = state->incoming_pdu;
1464         reply_pdu->mem_ctx = mem_ctx;
1465
1466         /*
1467          * Prevent state->incoming_pdu from being freed in
1468          * rpc_api_pipe_state_destructor()
1469          */
1470         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1471
1472         return NT_STATUS_OK;
1473 }
1474
1475 /*******************************************************************
1476  Creates krb5 auth bind.
1477  ********************************************************************/
1478
1479 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1480                                                 enum pipe_auth_level auth_level,
1481                                                 RPC_HDR_AUTH *pauth_out,
1482                                                 prs_struct *auth_data)
1483 {
1484 #ifdef HAVE_KRB5
1485         int ret;
1486         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1487         DATA_BLOB tkt = data_blob_null;
1488         DATA_BLOB tkt_wrapped = data_blob_null;
1489
1490         /* We may change the pad length before marshalling. */
1491         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1492
1493         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1494                 a->service_principal ));
1495
1496         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1497
1498         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1499                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1500
1501         if (ret) {
1502                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1503                         "failed with %s\n",
1504                         a->service_principal,
1505                         error_message(ret) ));
1506
1507                 data_blob_free(&tkt);
1508                 prs_mem_free(auth_data);
1509                 return NT_STATUS_INVALID_PARAMETER;
1510         }
1511
1512         /* wrap that up in a nice GSS-API wrapping */
1513         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1514
1515         data_blob_free(&tkt);
1516
1517         /* Auth len in the rpc header doesn't include auth_header. */
1518         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1519                 data_blob_free(&tkt_wrapped);
1520                 prs_mem_free(auth_data);
1521                 return NT_STATUS_NO_MEMORY;
1522         }
1523
1524         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1525         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1526
1527         data_blob_free(&tkt_wrapped);
1528         return NT_STATUS_OK;
1529 #else
1530         return NT_STATUS_INVALID_PARAMETER;
1531 #endif
1532 }
1533
1534 /*******************************************************************
1535  Creates SPNEGO NTLMSSP auth bind.
1536  ********************************************************************/
1537
1538 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1539                                                 enum pipe_auth_level auth_level,
1540                                                 RPC_HDR_AUTH *pauth_out,
1541                                                 prs_struct *auth_data)
1542 {
1543         NTSTATUS nt_status;
1544         DATA_BLOB null_blob = data_blob_null;
1545         DATA_BLOB request = data_blob_null;
1546         DATA_BLOB spnego_msg = data_blob_null;
1547
1548         /* We may change the pad length before marshalling. */
1549         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1550
1551         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1552         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1553                                         null_blob,
1554                                         &request);
1555
1556         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1557                 data_blob_free(&request);
1558                 prs_mem_free(auth_data);
1559                 return nt_status;
1560         }
1561
1562         /* Wrap this in SPNEGO. */
1563         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1564
1565         data_blob_free(&request);
1566
1567         /* Auth len in the rpc header doesn't include auth_header. */
1568         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1569                 data_blob_free(&spnego_msg);
1570                 prs_mem_free(auth_data);
1571                 return NT_STATUS_NO_MEMORY;
1572         }
1573
1574         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1575         dump_data(5, spnego_msg.data, spnego_msg.length);
1576
1577         data_blob_free(&spnego_msg);
1578         return NT_STATUS_OK;
1579 }
1580
1581 /*******************************************************************
1582  Creates NTLMSSP auth bind.
1583  ********************************************************************/
1584
1585 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1586                                                 enum pipe_auth_level auth_level,
1587                                                 RPC_HDR_AUTH *pauth_out,
1588                                                 prs_struct *auth_data)
1589 {
1590         NTSTATUS nt_status;
1591         DATA_BLOB null_blob = data_blob_null;
1592         DATA_BLOB request = data_blob_null;
1593
1594         /* We may change the pad length before marshalling. */
1595         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1596
1597         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1598         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1599                                         null_blob,
1600                                         &request);
1601
1602         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1603                 data_blob_free(&request);
1604                 prs_mem_free(auth_data);
1605                 return nt_status;
1606         }
1607
1608         /* Auth len in the rpc header doesn't include auth_header. */
1609         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1610                 data_blob_free(&request);
1611                 prs_mem_free(auth_data);
1612                 return NT_STATUS_NO_MEMORY;
1613         }
1614
1615         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1616         dump_data(5, request.data, request.length);
1617
1618         data_blob_free(&request);
1619         return NT_STATUS_OK;
1620 }
1621
1622 /*******************************************************************
1623  Creates schannel auth bind.
1624  ********************************************************************/
1625
1626 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1627                                                 enum pipe_auth_level auth_level,
1628                                                 RPC_HDR_AUTH *pauth_out,
1629                                                 prs_struct *auth_data)
1630 {
1631         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1632
1633         /* We may change the pad length before marshalling. */
1634         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1635
1636         /* Use lp_workgroup() if domain not specified */
1637
1638         if (!cli->auth->domain || !cli->auth->domain[0]) {
1639                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1640                 if (cli->auth->domain == NULL) {
1641                         return NT_STATUS_NO_MEMORY;
1642                 }
1643         }
1644
1645         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1646                                    global_myname());
1647
1648         /*
1649          * Now marshall the data into the auth parse_struct.
1650          */
1651
1652         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1653                                        &schannel_neg, auth_data, 0)) {
1654                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1655                 prs_mem_free(auth_data);
1656                 return NT_STATUS_NO_MEMORY;
1657         }
1658
1659         return NT_STATUS_OK;
1660 }
1661
1662 /*******************************************************************
1663  Creates the internals of a DCE/RPC bind request or alter context PDU.
1664  ********************************************************************/
1665
1666 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1667                                                 prs_struct *rpc_out, 
1668                                                 uint32 rpc_call_id,
1669                                                 const RPC_IFACE *abstract,
1670                                                 const RPC_IFACE *transfer,
1671                                                 RPC_HDR_AUTH *phdr_auth,
1672                                                 prs_struct *pauth_info)
1673 {
1674         RPC_HDR hdr;
1675         RPC_HDR_RB hdr_rb;
1676         RPC_CONTEXT rpc_ctx;
1677         uint16 auth_len = prs_offset(pauth_info);
1678         uint8 ss_padding_len = 0;
1679         uint16 frag_len = 0;
1680
1681         /* create the RPC context. */
1682         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1683
1684         /* create the bind request RPC_HDR_RB */
1685         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1686
1687         /* Start building the frag length. */
1688         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1689
1690         /* Do we need to pad ? */
1691         if (auth_len) {
1692                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1693                 if (data_len % 8) {
1694                         ss_padding_len = 8 - (data_len % 8);
1695                         phdr_auth->auth_pad_len = ss_padding_len;
1696                 }
1697                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1698         }
1699
1700         /* Create the request RPC_HDR */
1701         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1702
1703         /* Marshall the RPC header */
1704         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1705                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1706                 return NT_STATUS_NO_MEMORY;
1707         }
1708
1709         /* Marshall the bind request data */
1710         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1711                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1712                 return NT_STATUS_NO_MEMORY;
1713         }
1714
1715         /*
1716          * Grow the outgoing buffer to store any auth info.
1717          */
1718
1719         if(auth_len != 0) {
1720                 if (ss_padding_len) {
1721                         char pad[8];
1722                         memset(pad, '\0', 8);
1723                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1724                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1725                                 return NT_STATUS_NO_MEMORY;
1726                         }
1727                 }
1728
1729                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1730                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1731                         return NT_STATUS_NO_MEMORY;
1732                 }
1733
1734
1735                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1736                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1737                         return NT_STATUS_NO_MEMORY;
1738                 }
1739         }
1740
1741         return NT_STATUS_OK;
1742 }
1743
1744 /*******************************************************************
1745  Creates a DCE/RPC bind request.
1746  ********************************************************************/
1747
1748 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1749                                 prs_struct *rpc_out, 
1750                                 uint32 rpc_call_id,
1751                                 const RPC_IFACE *abstract,
1752                                 const RPC_IFACE *transfer,
1753                                 enum pipe_auth_type auth_type,
1754                                 enum pipe_auth_level auth_level)
1755 {
1756         RPC_HDR_AUTH hdr_auth;
1757         prs_struct auth_info;
1758         NTSTATUS ret = NT_STATUS_OK;
1759
1760         ZERO_STRUCT(hdr_auth);
1761         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1762                 return NT_STATUS_NO_MEMORY;
1763
1764         switch (auth_type) {
1765                 case PIPE_AUTH_TYPE_SCHANNEL:
1766                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1767                         if (!NT_STATUS_IS_OK(ret)) {
1768                                 prs_mem_free(&auth_info);
1769                                 return ret;
1770                         }
1771                         break;
1772
1773                 case PIPE_AUTH_TYPE_NTLMSSP:
1774                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1775                         if (!NT_STATUS_IS_OK(ret)) {
1776                                 prs_mem_free(&auth_info);
1777                                 return ret;
1778                         }
1779                         break;
1780
1781                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1782                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1783                         if (!NT_STATUS_IS_OK(ret)) {
1784                                 prs_mem_free(&auth_info);
1785                                 return ret;
1786                         }
1787                         break;
1788
1789                 case PIPE_AUTH_TYPE_KRB5:
1790                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1791                         if (!NT_STATUS_IS_OK(ret)) {
1792                                 prs_mem_free(&auth_info);
1793                                 return ret;
1794                         }
1795                         break;
1796
1797                 case PIPE_AUTH_TYPE_NONE:
1798                         break;
1799
1800                 default:
1801                         /* "Can't" happen. */
1802                         return NT_STATUS_INVALID_INFO_CLASS;
1803         }
1804
1805         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1806                                                 rpc_out, 
1807                                                 rpc_call_id,
1808                                                 abstract,
1809                                                 transfer,
1810                                                 &hdr_auth,
1811                                                 &auth_info);
1812
1813         prs_mem_free(&auth_info);
1814         return ret;
1815 }
1816
1817 /*******************************************************************
1818  Create and add the NTLMSSP sign/seal auth header and data.
1819  ********************************************************************/
1820
1821 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1822                                         RPC_HDR *phdr,
1823                                         uint32 ss_padding_len,
1824                                         prs_struct *outgoing_pdu)
1825 {
1826         RPC_HDR_AUTH auth_info;
1827         NTSTATUS status;
1828         DATA_BLOB auth_blob = data_blob_null;
1829         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1830
1831         if (!cli->auth->a_u.ntlmssp_state) {
1832                 return NT_STATUS_INVALID_PARAMETER;
1833         }
1834
1835         /* Init and marshall the auth header. */
1836         init_rpc_hdr_auth(&auth_info,
1837                         map_pipe_auth_type_to_rpc_auth_type(
1838                                 cli->auth->auth_type),
1839                         cli->auth->auth_level,
1840                         ss_padding_len,
1841                         1 /* context id. */);
1842
1843         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1844                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1845                 data_blob_free(&auth_blob);
1846                 return NT_STATUS_NO_MEMORY;
1847         }
1848
1849         switch (cli->auth->auth_level) {
1850                 case PIPE_AUTH_LEVEL_PRIVACY:
1851                         /* Data portion is encrypted. */
1852                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1853                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1854                                         data_and_pad_len,
1855                                         (unsigned char *)prs_data_p(outgoing_pdu),
1856                                         (size_t)prs_offset(outgoing_pdu),
1857                                         &auth_blob);
1858                         if (!NT_STATUS_IS_OK(status)) {
1859                                 data_blob_free(&auth_blob);
1860                                 return status;
1861                         }
1862                         break;
1863
1864                 case PIPE_AUTH_LEVEL_INTEGRITY:
1865                         /* Data is signed. */
1866                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1867                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1868                                         data_and_pad_len,
1869                                         (unsigned char *)prs_data_p(outgoing_pdu),
1870                                         (size_t)prs_offset(outgoing_pdu),
1871                                         &auth_blob);
1872                         if (!NT_STATUS_IS_OK(status)) {
1873                                 data_blob_free(&auth_blob);
1874                                 return status;
1875                         }
1876                         break;
1877
1878                 default:
1879                         /* Can't happen. */
1880                         smb_panic("bad auth level");
1881                         /* Notreached. */
1882                         return NT_STATUS_INVALID_PARAMETER;
1883         }
1884
1885         /* Finally marshall the blob. */
1886
1887         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1888                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1889                         (unsigned int)NTLMSSP_SIG_SIZE));
1890                 data_blob_free(&auth_blob);
1891                 return NT_STATUS_NO_MEMORY;
1892         }
1893
1894         data_blob_free(&auth_blob);
1895         return NT_STATUS_OK;
1896 }
1897
1898 /*******************************************************************
1899  Create and add the schannel sign/seal auth header and data.
1900  ********************************************************************/
1901
1902 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1903                                         RPC_HDR *phdr,
1904                                         uint32 ss_padding_len,
1905                                         prs_struct *outgoing_pdu)
1906 {
1907         RPC_HDR_AUTH auth_info;
1908         RPC_AUTH_SCHANNEL_CHK verf;
1909         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1910         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1911         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1912
1913         if (!sas) {
1914                 return NT_STATUS_INVALID_PARAMETER;
1915         }
1916
1917         /* Init and marshall the auth header. */
1918         init_rpc_hdr_auth(&auth_info,
1919                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1920                         cli->auth->auth_level,
1921                         ss_padding_len,
1922                         1 /* context id. */);
1923
1924         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1925                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1926                 return NT_STATUS_NO_MEMORY;
1927         }
1928
1929         switch (cli->auth->auth_level) {
1930                 case PIPE_AUTH_LEVEL_PRIVACY:
1931                 case PIPE_AUTH_LEVEL_INTEGRITY:
1932                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1933                                 sas->seq_num));
1934
1935                         schannel_encode(sas,
1936                                         cli->auth->auth_level,
1937                                         SENDER_IS_INITIATOR,
1938                                         &verf,
1939                                         data_p,
1940                                         data_and_pad_len);
1941
1942                         sas->seq_num++;
1943                         break;
1944
1945                 default:
1946                         /* Can't happen. */
1947                         smb_panic("bad auth level");
1948                         /* Notreached. */
1949                         return NT_STATUS_INVALID_PARAMETER;
1950         }
1951
1952         /* Finally marshall the blob. */
1953         smb_io_rpc_auth_schannel_chk("",
1954                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1955                         &verf,
1956                         outgoing_pdu,
1957                         0);
1958
1959         return NT_STATUS_OK;
1960 }
1961
1962 /*******************************************************************
1963  Calculate how much data we're going to send in this packet, also
1964  work out any sign/seal padding length.
1965  ********************************************************************/
1966
1967 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1968                                         uint32 data_left,
1969                                         uint16 *p_frag_len,
1970                                         uint16 *p_auth_len,
1971                                         uint32 *p_ss_padding)
1972 {
1973         uint32 data_space, data_len;
1974
1975 #ifdef DEVELOPER
1976         if ((data_left > 0) && (sys_random() % 2)) {
1977                 data_left = MAX(data_left/2, 1);
1978         }
1979 #endif
1980
1981         switch (cli->auth->auth_level) {
1982                 case PIPE_AUTH_LEVEL_NONE:
1983                 case PIPE_AUTH_LEVEL_CONNECT:
1984                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1985                         data_len = MIN(data_space, data_left);
1986                         *p_ss_padding = 0;
1987                         *p_auth_len = 0;
1988                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1989                         return data_len;
1990
1991                 case PIPE_AUTH_LEVEL_INTEGRITY:
1992                 case PIPE_AUTH_LEVEL_PRIVACY:
1993                         /* Treat the same for all authenticated rpc requests. */
1994                         switch(cli->auth->auth_type) {
1995                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1996                                 case PIPE_AUTH_TYPE_NTLMSSP:
1997                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1998                                         break;
1999                                 case PIPE_AUTH_TYPE_SCHANNEL:
2000                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2001                                         break;
2002                                 default:
2003                                         smb_panic("bad auth type");
2004                                         break;
2005                         }
2006
2007                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2008                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2009
2010                         data_len = MIN(data_space, data_left);
2011                         *p_ss_padding = 0;
2012                         if (data_len % 8) {
2013                                 *p_ss_padding = 8 - (data_len % 8);
2014                         }
2015                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2016                                         data_len + *p_ss_padding +              /* data plus padding. */
2017                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2018                         return data_len;
2019
2020                 default:
2021                         smb_panic("bad auth level");
2022                         /* Notreached. */
2023                         return 0;
2024         }
2025 }
2026
2027 /*******************************************************************
2028  External interface.
2029  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2030  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2031  and deals with signing/sealing details.
2032  ********************************************************************/
2033
2034 struct rpc_api_pipe_req_state {
2035         struct event_context *ev;
2036         struct rpc_pipe_client *cli;
2037         uint8_t op_num;
2038         uint32_t call_id;
2039         prs_struct *req_data;
2040         uint32_t req_data_sent;
2041         prs_struct outgoing_frag;
2042         prs_struct reply_pdu;
2043 };
2044
2045 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2046 {
2047         prs_mem_free(&s->outgoing_frag);
2048         prs_mem_free(&s->reply_pdu);
2049         return 0;
2050 }
2051
2052 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2053 static void rpc_api_pipe_req_done(struct async_req *subreq);
2054 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2055                                   bool *is_last_frag);
2056
2057 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2058                                         struct event_context *ev,
2059                                         struct rpc_pipe_client *cli,
2060                                         uint8_t op_num,
2061                                         prs_struct *req_data)
2062 {
2063         struct async_req *result, *subreq;
2064         struct tevent_req *subreq2;
2065         struct rpc_api_pipe_req_state *state;
2066         NTSTATUS status;
2067         bool is_last_frag;
2068
2069         if (!async_req_setup(mem_ctx, &result, &state,
2070                              struct rpc_api_pipe_req_state)) {
2071                 return NULL;
2072         }
2073         state->ev = ev;
2074         state->cli = cli;
2075         state->op_num = op_num;
2076         state->req_data = req_data;
2077         state->req_data_sent = 0;
2078         state->call_id = get_rpc_call_id();
2079
2080         if (cli->max_xmit_frag
2081             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2082                 /* Server is screwed up ! */
2083                 status = NT_STATUS_INVALID_PARAMETER;
2084                 goto post_status;
2085         }
2086
2087         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2088
2089         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2090                       state, MARSHALL)) {
2091                 status = NT_STATUS_NO_MEMORY;
2092                 goto post_status;
2093         }
2094
2095         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2096
2097         status = prepare_next_frag(state, &is_last_frag);
2098         if (!NT_STATUS_IS_OK(status)) {
2099                 goto post_status;
2100         }
2101
2102         if (is_last_frag) {
2103                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2104                                            &state->outgoing_frag,
2105                                            RPC_RESPONSE);
2106                 if (subreq == NULL) {
2107                         status = NT_STATUS_NO_MEMORY;
2108                         goto post_status;
2109                 }
2110                 subreq->async.fn = rpc_api_pipe_req_done;
2111                 subreq->async.priv = result;
2112         } else {
2113                 subreq2 = rpc_write_send(
2114                         state, ev, cli->transport,
2115                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2116                         prs_offset(&state->outgoing_frag));
2117                 if (subreq2 == NULL) {
2118                         status = NT_STATUS_NO_MEMORY;
2119                         goto post_status;
2120                 }
2121                 tevent_req_set_callback(subreq2, rpc_api_pipe_req_write_done,
2122                                         result);
2123         }
2124         return result;
2125
2126  post_status:
2127         if (async_post_ntstatus(result, ev, status)) {
2128                 return result;
2129         }
2130         TALLOC_FREE(result);
2131         return NULL;
2132 }
2133
2134 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2135                                   bool *is_last_frag)
2136 {
2137         RPC_HDR hdr;
2138         RPC_HDR_REQ hdr_req;
2139         uint32_t data_sent_thistime;
2140         uint16_t auth_len;
2141         uint16_t frag_len;
2142         uint8_t flags = 0;
2143         uint32_t ss_padding;
2144         uint32_t data_left;
2145         char pad[8] = { 0, };
2146         NTSTATUS status;
2147
2148         data_left = prs_offset(state->req_data) - state->req_data_sent;
2149
2150         data_sent_thistime = calculate_data_len_tosend(
2151                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2152
2153         if (state->req_data_sent == 0) {
2154                 flags = RPC_FLG_FIRST;
2155         }
2156
2157         if (data_sent_thistime == data_left) {
2158                 flags |= RPC_FLG_LAST;
2159         }
2160
2161         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2162                 return NT_STATUS_NO_MEMORY;
2163         }
2164
2165         /* Create and marshall the header and request header. */
2166         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2167                      auth_len);
2168
2169         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2170                 return NT_STATUS_NO_MEMORY;
2171         }
2172
2173         /* Create the rpc request RPC_HDR_REQ */
2174         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2175                          state->op_num);
2176
2177         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2178                                 &state->outgoing_frag, 0)) {
2179                 return NT_STATUS_NO_MEMORY;
2180         }
2181
2182         /* Copy in the data, plus any ss padding. */
2183         if (!prs_append_some_prs_data(&state->outgoing_frag,
2184                                       state->req_data, state->req_data_sent,
2185                                       data_sent_thistime)) {
2186                 return NT_STATUS_NO_MEMORY;
2187         }
2188
2189         /* Copy the sign/seal padding data. */
2190         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2191                 return NT_STATUS_NO_MEMORY;
2192         }
2193
2194         /* Generate any auth sign/seal and add the auth footer. */
2195         switch (state->cli->auth->auth_type) {
2196         case PIPE_AUTH_TYPE_NONE:
2197                 status = NT_STATUS_OK;
2198                 break;
2199         case PIPE_AUTH_TYPE_NTLMSSP:
2200         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2201                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2202                                                  &state->outgoing_frag);
2203                 break;
2204         case PIPE_AUTH_TYPE_SCHANNEL:
2205                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2206                                                   &state->outgoing_frag);
2207                 break;
2208         default:
2209                 status = NT_STATUS_INVALID_PARAMETER;
2210                 break;
2211         }
2212
2213         state->req_data_sent += data_sent_thistime;
2214         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2215
2216         return status;
2217 }
2218
2219 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2220 {
2221         struct async_req *req = tevent_req_callback_data(
2222                 subreq, struct async_req);
2223         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2224                 req->private_data, struct rpc_api_pipe_req_state);
2225         struct async_req *subreq2;
2226         NTSTATUS status;
2227         bool is_last_frag;
2228
2229         status = rpc_write_recv(subreq);
2230         TALLOC_FREE(subreq);
2231         if (!NT_STATUS_IS_OK(status)) {
2232                 async_req_nterror(req, status);
2233                 return;
2234         }
2235
2236         status = prepare_next_frag(state, &is_last_frag);
2237         if (!NT_STATUS_IS_OK(status)) {
2238                 async_req_nterror(req, status);
2239                 return;
2240         }
2241
2242         if (is_last_frag) {
2243                 subreq2 = rpc_api_pipe_send(state, state->ev, state->cli,
2244                                            &state->outgoing_frag,
2245                                            RPC_RESPONSE);
2246                 if (async_req_nomem(subreq2, req)) {
2247                         return;
2248                 }
2249                 subreq2->async.fn = rpc_api_pipe_req_done;
2250                 subreq2->async.priv = req;
2251         } else {
2252                 subreq = rpc_write_send(
2253                         state, state->ev,
2254                         state->cli->transport,
2255                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2256                         prs_offset(&state->outgoing_frag));
2257                 if (async_req_nomem(subreq, req)) {
2258                         return;
2259                 }
2260                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2261                                         req);
2262         }
2263 }
2264
2265 static void rpc_api_pipe_req_done(struct async_req *subreq)
2266 {
2267         struct async_req *req = talloc_get_type_abort(
2268                 subreq->async.priv, struct async_req);
2269         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2270                 req->private_data, struct rpc_api_pipe_req_state);
2271         NTSTATUS status;
2272
2273         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2274         TALLOC_FREE(subreq);
2275         if (!NT_STATUS_IS_OK(status)) {
2276                 async_req_nterror(req, status);
2277                 return;
2278         }
2279         async_req_done(req);
2280 }
2281
2282 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2283                                prs_struct *reply_pdu)
2284 {
2285         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2286                 req->private_data, struct rpc_api_pipe_req_state);
2287         NTSTATUS status;
2288
2289         if (async_req_is_nterror(req, &status)) {
2290                 /*
2291                  * We always have to initialize to reply pdu, even if there is
2292                  * none. The rpccli_* caller routines expect this.
2293                  */
2294                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2295                 return status;
2296         }
2297
2298         *reply_pdu = state->reply_pdu;
2299         reply_pdu->mem_ctx = mem_ctx;
2300
2301         /*
2302          * Prevent state->req_pdu from being freed in
2303          * rpc_api_pipe_req_state_destructor()
2304          */
2305         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2306
2307         return NT_STATUS_OK;
2308 }
2309
2310 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2311                         uint8 op_num,
2312                         prs_struct *in_data,
2313                         prs_struct *out_data)
2314 {
2315         TALLOC_CTX *frame = talloc_stackframe();
2316         struct event_context *ev;
2317         struct async_req *req;
2318         NTSTATUS status = NT_STATUS_NO_MEMORY;
2319
2320         ev = event_context_init(frame);
2321         if (ev == NULL) {
2322                 goto fail;
2323         }
2324
2325         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2326         if (req == NULL) {
2327                 goto fail;
2328         }
2329
2330         while (req->state < ASYNC_REQ_DONE) {
2331                 event_loop_once(ev);
2332         }
2333
2334         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2335  fail:
2336         TALLOC_FREE(frame);
2337         return status;
2338 }
2339
2340 #if 0
2341 /****************************************************************************
2342  Set the handle state.
2343 ****************************************************************************/
2344
2345 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2346                                    const char *pipe_name, uint16 device_state)
2347 {
2348         bool state_set = False;
2349         char param[2];
2350         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2351         char *rparam = NULL;
2352         char *rdata = NULL;
2353         uint32 rparam_len, rdata_len;
2354
2355         if (pipe_name == NULL)
2356                 return False;
2357
2358         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2359                  cli->fnum, pipe_name, device_state));
2360
2361         /* create parameters: device state */
2362         SSVAL(param, 0, device_state);
2363
2364         /* create setup parameters. */
2365         setup[0] = 0x0001; 
2366         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2367
2368         /* send the data on \PIPE\ */
2369         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2370                     setup, 2, 0,                /* setup, length, max */
2371                     param, 2, 0,                /* param, length, max */
2372                     NULL, 0, 1024,              /* data, length, max */
2373                     &rparam, &rparam_len,        /* return param, length */
2374                     &rdata, &rdata_len))         /* return data, length */
2375         {
2376                 DEBUG(5, ("Set Handle state: return OK\n"));
2377                 state_set = True;
2378         }
2379
2380         SAFE_FREE(rparam);
2381         SAFE_FREE(rdata);
2382
2383         return state_set;
2384 }
2385 #endif
2386
2387 /****************************************************************************
2388  Check the rpc bind acknowledge response.
2389 ****************************************************************************/
2390
2391 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2392 {
2393         if ( hdr_ba->addr.len == 0) {
2394                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2395         }
2396
2397         /* check the transfer syntax */
2398         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2399              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2400                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2401                 return False;
2402         }
2403
2404         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2405                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2406                           hdr_ba->res.num_results, hdr_ba->res.reason));
2407         }
2408
2409         DEBUG(5,("check_bind_response: accepted!\n"));
2410         return True;
2411 }
2412
2413 /*******************************************************************
2414  Creates a DCE/RPC bind authentication response.
2415  This is the packet that is sent back to the server once we
2416  have received a BIND-ACK, to finish the third leg of
2417  the authentication handshake.
2418  ********************************************************************/
2419
2420 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2421                                 uint32 rpc_call_id,
2422                                 enum pipe_auth_type auth_type,
2423                                 enum pipe_auth_level auth_level,
2424                                 DATA_BLOB *pauth_blob,
2425                                 prs_struct *rpc_out)
2426 {
2427         RPC_HDR hdr;
2428         RPC_HDR_AUTH hdr_auth;
2429         uint32 pad = 0;
2430
2431         /* Create the request RPC_HDR */
2432         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2433                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2434                      pauth_blob->length );
2435
2436         /* Marshall it. */
2437         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2438                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2439                 return NT_STATUS_NO_MEMORY;
2440         }
2441
2442         /*
2443                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2444                 about padding - shouldn't this pad to length 8 ? JRA.
2445         */
2446
2447         /* 4 bytes padding. */
2448         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2449                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2450                 return NT_STATUS_NO_MEMORY;
2451         }
2452
2453         /* Create the request RPC_HDR_AUTHA */
2454         init_rpc_hdr_auth(&hdr_auth,
2455                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2456                         auth_level, 0, 1);
2457
2458         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2459                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2460                 return NT_STATUS_NO_MEMORY;
2461         }
2462
2463         /*
2464          * Append the auth data to the outgoing buffer.
2465          */
2466
2467         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2468                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2469                 return NT_STATUS_NO_MEMORY;
2470         }
2471
2472         return NT_STATUS_OK;
2473 }
2474
2475 /*******************************************************************
2476  Creates a DCE/RPC bind alter context authentication request which
2477  may contain a spnego auth blobl
2478  ********************************************************************/
2479
2480 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2481                                         const RPC_IFACE *abstract,
2482                                         const RPC_IFACE *transfer,
2483                                         enum pipe_auth_level auth_level,
2484                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2485                                         prs_struct *rpc_out)
2486 {
2487         RPC_HDR_AUTH hdr_auth;
2488         prs_struct auth_info;
2489         NTSTATUS ret = NT_STATUS_OK;
2490
2491         ZERO_STRUCT(hdr_auth);
2492         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2493                 return NT_STATUS_NO_MEMORY;
2494
2495         /* We may change the pad length before marshalling. */
2496         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2497
2498         if (pauth_blob->length) {
2499                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2500                         prs_mem_free(&auth_info);
2501                         return NT_STATUS_NO_MEMORY;
2502                 }
2503         }
2504
2505         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2506                                                 rpc_out, 
2507                                                 rpc_call_id,
2508                                                 abstract,
2509                                                 transfer,
2510                                                 &hdr_auth,
2511                                                 &auth_info);
2512         prs_mem_free(&auth_info);
2513         return ret;
2514 }
2515
2516 /****************************************************************************
2517  Do an rpc bind.
2518 ****************************************************************************/
2519
2520 struct rpc_pipe_bind_state {
2521         struct event_context *ev;
2522         struct rpc_pipe_client *cli;
2523         prs_struct rpc_out;
2524         uint32_t rpc_call_id;
2525 };
2526
2527 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2528 {
2529         prs_mem_free(&state->rpc_out);
2530         return 0;
2531 }
2532
2533 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2534 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2535                                            struct rpc_pipe_bind_state *state,
2536                                            struct rpc_hdr_info *phdr,
2537                                            prs_struct *reply_pdu);
2538 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2539 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2540                                                     struct rpc_pipe_bind_state *state,
2541                                                     struct rpc_hdr_info *phdr,
2542                                                     prs_struct *reply_pdu);
2543 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2544
2545 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2546                                      struct event_context *ev,
2547                                      struct rpc_pipe_client *cli,
2548                                      struct cli_pipe_auth_data *auth)
2549 {
2550         struct async_req *result, *subreq;
2551         struct rpc_pipe_bind_state *state;
2552         NTSTATUS status;
2553
2554         if (!async_req_setup(mem_ctx, &result, &state,
2555                              struct rpc_pipe_bind_state)) {
2556                 return NULL;
2557         }
2558
2559         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2560                 rpccli_pipe_txt(debug_ctx(), cli),
2561                 (unsigned int)auth->auth_type,
2562                 (unsigned int)auth->auth_level ));
2563
2564         state->ev = ev;
2565         state->cli = cli;
2566         state->rpc_call_id = get_rpc_call_id();
2567
2568         prs_init_empty(&state->rpc_out, state, MARSHALL);
2569         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2570
2571         cli->auth = talloc_move(cli, &auth);
2572
2573         /* Marshall the outgoing data. */
2574         status = create_rpc_bind_req(cli, &state->rpc_out,
2575                                      state->rpc_call_id,
2576                                      &cli->abstract_syntax,
2577                                      &cli->transfer_syntax,
2578                                      cli->auth->auth_type,
2579                                      cli->auth->auth_level);
2580
2581         if (!NT_STATUS_IS_OK(status)) {
2582                 goto post_status;
2583         }
2584
2585         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2586                                    RPC_BINDACK);
2587         if (subreq == NULL) {
2588                 status = NT_STATUS_NO_MEMORY;
2589                 goto post_status;
2590         }
2591         subreq->async.fn = rpc_pipe_bind_step_one_done;
2592         subreq->async.priv = result;
2593         return result;
2594
2595  post_status:
2596         if (async_post_ntstatus(result, ev, status)) {
2597                 return result;
2598         }
2599         TALLOC_FREE(result);
2600         return NULL;
2601 }
2602
2603 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2604 {
2605         struct async_req *req = talloc_get_type_abort(
2606                 subreq->async.priv, struct async_req);
2607         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2608                 req->private_data, struct rpc_pipe_bind_state);
2609         prs_struct reply_pdu;
2610         struct rpc_hdr_info hdr;
2611         struct rpc_hdr_ba_info hdr_ba;
2612         NTSTATUS status;
2613
2614         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2615         TALLOC_FREE(subreq);
2616         if (!NT_STATUS_IS_OK(status)) {
2617                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2618                           rpccli_pipe_txt(debug_ctx(), state->cli),
2619                           nt_errstr(status)));
2620                 async_req_nterror(req, status);
2621                 return;
2622         }
2623
2624         /* Unmarshall the RPC header */
2625         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2626                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2627                 prs_mem_free(&reply_pdu);
2628                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2629                 return;
2630         }
2631
2632         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2633                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2634                           "RPC_HDR_BA.\n"));
2635                 prs_mem_free(&reply_pdu);
2636                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2637                 return;
2638         }
2639
2640         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2641                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2642                 prs_mem_free(&reply_pdu);
2643                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2644                 return;
2645         }
2646
2647         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2648         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2649
2650         /*
2651          * For authenticated binds we may need to do 3 or 4 leg binds.
2652          */
2653
2654         switch(state->cli->auth->auth_type) {
2655
2656         case PIPE_AUTH_TYPE_NONE:
2657         case PIPE_AUTH_TYPE_SCHANNEL:
2658                 /* Bind complete. */
2659                 prs_mem_free(&reply_pdu);
2660                 async_req_done(req);
2661                 break;
2662
2663         case PIPE_AUTH_TYPE_NTLMSSP:
2664                 /* Need to send AUTH3 packet - no reply. */
2665                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2666                                                     &reply_pdu);
2667                 prs_mem_free(&reply_pdu);
2668                 if (!NT_STATUS_IS_OK(status)) {
2669                         async_req_nterror(req, status);
2670                 }
2671                 break;
2672
2673         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2674                 /* Need to send alter context request and reply. */
2675                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2676                                                              &reply_pdu);
2677                 prs_mem_free(&reply_pdu);
2678                 if (!NT_STATUS_IS_OK(status)) {
2679                         async_req_nterror(req, status);
2680                 }
2681                 break;
2682
2683         case PIPE_AUTH_TYPE_KRB5:
2684                 /* */
2685
2686         default:
2687                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2688                          (unsigned int)state->cli->auth->auth_type));
2689                 prs_mem_free(&reply_pdu);
2690                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2691         }
2692 }
2693
2694 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2695                                            struct rpc_pipe_bind_state *state,
2696                                            struct rpc_hdr_info *phdr,
2697                                            prs_struct *reply_pdu)
2698 {
2699         DATA_BLOB server_response = data_blob_null;
2700         DATA_BLOB client_reply = data_blob_null;
2701         struct rpc_hdr_auth_info hdr_auth;
2702         struct tevent_req *subreq;
2703         NTSTATUS status;
2704
2705         if ((phdr->auth_len == 0)
2706             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2707                 return NT_STATUS_INVALID_PARAMETER;
2708         }
2709
2710         if (!prs_set_offset(
2711                     reply_pdu,
2712                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2713                 return NT_STATUS_INVALID_PARAMETER;
2714         }
2715
2716         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2717                 return NT_STATUS_INVALID_PARAMETER;
2718         }
2719
2720         /* TODO - check auth_type/auth_level match. */
2721
2722         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2723         prs_copy_data_out((char *)server_response.data, reply_pdu,
2724                           phdr->auth_len);
2725
2726         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2727                                 server_response, &client_reply);
2728
2729         if (!NT_STATUS_IS_OK(status)) {
2730                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2731                           "blob failed: %s.\n", nt_errstr(status)));
2732                 return status;
2733         }
2734
2735         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2736
2737         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2738                                        state->cli->auth->auth_type,
2739                                        state->cli->auth->auth_level,
2740                                        &client_reply, &state->rpc_out);
2741         data_blob_free(&client_reply);
2742
2743         if (!NT_STATUS_IS_OK(status)) {
2744                 return status;
2745         }
2746
2747         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2748                                 (uint8_t *)prs_data_p(&state->rpc_out),
2749                                 prs_offset(&state->rpc_out));
2750         if (subreq == NULL) {
2751                 return NT_STATUS_NO_MEMORY;
2752         }
2753         tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2754         return NT_STATUS_OK;
2755 }
2756
2757 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2758 {
2759         struct async_req *req = tevent_req_callback_data(
2760                 subreq, struct async_req);
2761         NTSTATUS status;
2762
2763         status = rpc_write_recv(subreq);
2764         TALLOC_FREE(subreq);
2765         if (!NT_STATUS_IS_OK(status)) {
2766                 async_req_nterror(req, status);
2767                 return;
2768         }
2769         async_req_done(req);
2770 }
2771
2772 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2773                                                     struct rpc_pipe_bind_state *state,
2774                                                     struct rpc_hdr_info *phdr,
2775                                                     prs_struct *reply_pdu)
2776 {
2777         DATA_BLOB server_spnego_response = data_blob_null;
2778         DATA_BLOB server_ntlm_response = data_blob_null;
2779         DATA_BLOB client_reply = data_blob_null;
2780         DATA_BLOB tmp_blob = data_blob_null;
2781         RPC_HDR_AUTH hdr_auth;
2782         struct async_req *subreq;
2783         NTSTATUS status;
2784
2785         if ((phdr->auth_len == 0)
2786             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2787                 return NT_STATUS_INVALID_PARAMETER;
2788         }
2789
2790         /* Process the returned NTLMSSP blob first. */
2791         if (!prs_set_offset(
2792                     reply_pdu,
2793                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2794                 return NT_STATUS_INVALID_PARAMETER;
2795         }
2796
2797         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2798                 return NT_STATUS_INVALID_PARAMETER;
2799         }
2800
2801         server_spnego_response = data_blob(NULL, phdr->auth_len);
2802         prs_copy_data_out((char *)server_spnego_response.data,
2803                           reply_pdu, phdr->auth_len);
2804
2805         /*
2806          * The server might give us back two challenges - tmp_blob is for the
2807          * second.
2808          */
2809         if (!spnego_parse_challenge(server_spnego_response,
2810                                     &server_ntlm_response, &tmp_blob)) {
2811                 data_blob_free(&server_spnego_response);
2812                 data_blob_free(&server_ntlm_response);
2813                 data_blob_free(&tmp_blob);
2814                 return NT_STATUS_INVALID_PARAMETER;
2815         }
2816
2817         /* We're finished with the server spnego response and the tmp_blob. */
2818         data_blob_free(&server_spnego_response);
2819         data_blob_free(&tmp_blob);
2820
2821         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2822                                 server_ntlm_response, &client_reply);
2823
2824         /* Finished with the server_ntlm response */
2825         data_blob_free(&server_ntlm_response);
2826
2827         if (!NT_STATUS_IS_OK(status)) {
2828                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2829                           "using server blob failed.\n"));
2830                 data_blob_free(&client_reply);
2831                 return status;
2832         }
2833
2834         /* SPNEGO wrap the client reply. */
2835         tmp_blob = spnego_gen_auth(client_reply);
2836         data_blob_free(&client_reply);
2837         client_reply = tmp_blob;
2838         tmp_blob = data_blob_null;
2839
2840         /* Now prepare the alter context pdu. */
2841         prs_init_empty(&state->rpc_out, state, MARSHALL);
2842
2843         status = create_rpc_alter_context(state->rpc_call_id,
2844                                           &state->cli->abstract_syntax,
2845                                           &state->cli->transfer_syntax,
2846                                           state->cli->auth->auth_level,
2847                                           &client_reply,
2848                                           &state->rpc_out);
2849         data_blob_free(&client_reply);
2850
2851         if (!NT_STATUS_IS_OK(status)) {
2852                 return status;
2853         }
2854
2855         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2856                                    &state->rpc_out, RPC_ALTCONTRESP);
2857         if (subreq == NULL) {
2858                 return NT_STATUS_NO_MEMORY;
2859         }
2860         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2861         subreq->async.priv = req;
2862         return NT_STATUS_OK;
2863 }
2864
2865 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2866 {
2867         struct async_req *req = talloc_get_type_abort(
2868                 subreq->async.priv, struct async_req);
2869         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2870                 req->private_data, struct rpc_pipe_bind_state);
2871         DATA_BLOB server_spnego_response = data_blob_null;
2872         DATA_BLOB tmp_blob = data_blob_null;
2873         prs_struct reply_pdu;
2874         struct rpc_hdr_info hdr;
2875         struct rpc_hdr_auth_info hdr_auth;
2876         NTSTATUS status;
2877
2878         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2879         TALLOC_FREE(subreq);
2880         if (!NT_STATUS_IS_OK(status)) {
2881                 async_req_nterror(req, status);
2882                 return;
2883         }
2884
2885         /* Get the auth blob from the reply. */
2886         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2887                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2888                           "unmarshall RPC_HDR.\n"));
2889                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2890                 return;
2891         }
2892
2893         if (!prs_set_offset(
2894                     &reply_pdu,
2895                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2896                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2897                 return;
2898         }
2899
2900         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2901                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2902                 return;
2903         }
2904
2905         server_spnego_response = data_blob(NULL, hdr.auth_len);
2906         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2907                           hdr.auth_len);
2908
2909         /* Check we got a valid auth response. */
2910         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2911                                         OID_NTLMSSP, &tmp_blob)) {
2912                 data_blob_free(&server_spnego_response);
2913                 data_blob_free(&tmp_blob);
2914                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2915                 return;
2916         }
2917
2918         data_blob_free(&server_spnego_response);
2919         data_blob_free(&tmp_blob);
2920
2921         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2922                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2923         async_req_done(req);
2924 }
2925
2926 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2927 {
2928         return async_req_simple_recv_ntstatus(req);
2929 }
2930
2931 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2932                        struct cli_pipe_auth_data *auth)
2933 {
2934         TALLOC_CTX *frame = talloc_stackframe();
2935         struct event_context *ev;
2936         struct async_req *req;
2937         NTSTATUS status = NT_STATUS_NO_MEMORY;
2938
2939         ev = event_context_init(frame);
2940         if (ev == NULL) {
2941                 goto fail;
2942         }
2943
2944         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2945         if (req == NULL) {
2946                 goto fail;
2947         }
2948
2949         while (req->state < ASYNC_REQ_DONE) {
2950                 event_loop_once(ev);
2951         }
2952
2953         status = rpc_pipe_bind_recv(req);
2954  fail:
2955         TALLOC_FREE(frame);
2956         return status;
2957 }
2958
2959 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2960                                 unsigned int timeout)
2961 {
2962         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2963
2964         if (cli == NULL) {
2965                 return 0;
2966         }
2967         return cli_set_timeout(cli, timeout);
2968 }
2969
2970 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2971 {
2972         struct cli_state *cli;
2973
2974         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2975             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2976                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2977                 return true;
2978         }
2979
2980         cli = rpc_pipe_np_smb_conn(rpc_cli);
2981         if (cli == NULL) {
2982                 return false;
2983         }
2984         E_md4hash(cli->password ? cli->password : "", nt_hash);
2985         return true;
2986 }
2987
2988 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2989                                struct cli_pipe_auth_data **presult)
2990 {
2991         struct cli_pipe_auth_data *result;
2992
2993         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2994         if (result == NULL) {
2995                 return NT_STATUS_NO_MEMORY;
2996         }
2997
2998         result->auth_type = PIPE_AUTH_TYPE_NONE;
2999         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3000
3001         result->user_name = talloc_strdup(result, "");
3002         result->domain = talloc_strdup(result, "");
3003         if ((result->user_name == NULL) || (result->domain == NULL)) {
3004                 TALLOC_FREE(result);
3005                 return NT_STATUS_NO_MEMORY;
3006         }
3007
3008         *presult = result;
3009         return NT_STATUS_OK;
3010 }
3011
3012 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3013 {
3014         ntlmssp_end(&auth->a_u.ntlmssp_state);
3015         return 0;
3016 }
3017
3018 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3019                                   enum pipe_auth_type auth_type,
3020                                   enum pipe_auth_level auth_level,
3021                                   const char *domain,
3022                                   const char *username,
3023                                   const char *password,
3024                                   struct cli_pipe_auth_data **presult)
3025 {
3026         struct cli_pipe_auth_data *result;
3027         NTSTATUS status;
3028
3029         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3030         if (result == NULL) {
3031                 return NT_STATUS_NO_MEMORY;
3032         }
3033
3034         result->auth_type = auth_type;
3035         result->auth_level = auth_level;
3036
3037         result->user_name = talloc_strdup(result, username);
3038         result->domain = talloc_strdup(result, domain);
3039         if ((result->user_name == NULL) || (result->domain == NULL)) {
3040                 status = NT_STATUS_NO_MEMORY;
3041                 goto fail;
3042         }
3043
3044         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3045         if (!NT_STATUS_IS_OK(status)) {
3046                 goto fail;
3047         }
3048
3049         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3050
3051         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3052         if (!NT_STATUS_IS_OK(status)) {
3053                 goto fail;
3054         }
3055
3056         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3057         if (!NT_STATUS_IS_OK(status)) {
3058                 goto fail;
3059         }
3060
3061         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3062         if (!NT_STATUS_IS_OK(status)) {
3063                 goto fail;
3064         }
3065
3066         /*
3067          * Turn off sign+seal to allow selected auth level to turn it back on.
3068          */
3069         result->a_u.ntlmssp_state->neg_flags &=
3070                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3071
3072         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3073                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3074         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3075                 result->a_u.ntlmssp_state->neg_flags
3076                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3077         }
3078
3079         *presult = result;
3080         return NT_STATUS_OK;
3081
3082  fail:
3083         TALLOC_FREE(result);
3084         return status;
3085 }
3086
3087 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3088                                    enum pipe_auth_level auth_level,
3089                                    const uint8_t sess_key[16],
3090                                    struct cli_pipe_auth_data **presult)
3091 {
3092         struct cli_pipe_auth_data *result;
3093
3094         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3095         if (result == NULL) {
3096                 return NT_STATUS_NO_MEMORY;
3097         }
3098
3099         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3100         result->auth_level = auth_level;
3101
3102         result->user_name = talloc_strdup(result, "");
3103         result->domain = talloc_strdup(result, domain);
3104         if ((result->user_name == NULL) || (result->domain == NULL)) {
3105                 goto fail;
3106         }
3107
3108         result->a_u.schannel_auth = talloc(result,
3109                                            struct schannel_auth_struct);
3110         if (result->a_u.schannel_auth == NULL) {
3111                 goto fail;
3112         }
3113
3114         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3115                sizeof(result->a_u.schannel_auth->sess_key));
3116         result->a_u.schannel_auth->seq_num = 0;
3117
3118         *presult = result;
3119         return NT_STATUS_OK;
3120
3121  fail:
3122         TALLOC_FREE(result);
3123         return NT_STATUS_NO_MEMORY;
3124 }
3125
3126 #ifdef HAVE_KRB5
3127 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3128 {
3129         data_blob_free(&auth->session_key);
3130         return 0;
3131 }
3132 #endif
3133
3134 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3135                                    enum pipe_auth_level auth_level,
3136                                    const char *service_princ,
3137                                    const char *username,
3138                                    const char *password,
3139                                    struct cli_pipe_auth_data **presult)
3140 {
3141 #ifdef HAVE_KRB5
3142         struct cli_pipe_auth_data *result;
3143
3144         if ((username != NULL) && (password != NULL)) {
3145                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3146                 if (ret != 0) {
3147                         return NT_STATUS_ACCESS_DENIED;
3148                 }
3149         }
3150
3151         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3152         if (result == NULL) {
3153                 return NT_STATUS_NO_MEMORY;
3154         }
3155
3156         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3157         result->auth_level = auth_level;
3158
3159         /*
3160          * Username / domain need fixing!
3161          */
3162         result->user_name = talloc_strdup(result, "");
3163         result->domain = talloc_strdup(result, "");
3164         if ((result->user_name == NULL) || (result->domain == NULL)) {
3165                 goto fail;
3166         }
3167
3168         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3169                 result, struct kerberos_auth_struct);
3170         if (result->a_u.kerberos_auth == NULL) {
3171                 goto fail;
3172         }
3173         talloc_set_destructor(result->a_u.kerberos_auth,
3174                               cli_auth_kerberos_data_destructor);
3175
3176         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3177                 result, service_princ);
3178         if (result->a_u.kerberos_auth->service_principal == NULL) {
3179                 goto fail;
3180         }
3181
3182         *presult = result;
3183         return NT_STATUS_OK;
3184
3185  fail:
3186         TALLOC_FREE(result);
3187         return NT_STATUS_NO_MEMORY;
3188 #else
3189         return NT_STATUS_NOT_SUPPORTED;
3190 #endif
3191 }
3192
3193 /**
3194  * Create an rpc pipe client struct, connecting to a tcp port.
3195  */
3196 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3197                                        uint16_t port,
3198                                        const struct ndr_syntax_id *abstract_syntax,
3199                                        struct rpc_pipe_client **presult)
3200 {
3201         struct rpc_pipe_client *result;
3202         struct sockaddr_storage addr;
3203         NTSTATUS status;
3204         int fd;
3205
3206         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3207         if (result == NULL) {
3208                 return NT_STATUS_NO_MEMORY;
3209         }
3210
3211         result->abstract_syntax = *abstract_syntax;
3212         result->transfer_syntax = ndr_transfer_syntax;
3213         result->dispatch = cli_do_rpc_ndr;
3214
3215         result->desthost = talloc_strdup(result, host);
3216         result->srv_name_slash = talloc_asprintf_strupper_m(
3217                 result, "\\\\%s", result->desthost);
3218         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3219                 status = NT_STATUS_NO_MEMORY;
3220                 goto fail;
3221         }
3222
3223         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3224         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3225
3226         if (!resolve_name(host, &addr, 0)) {
3227                 status = NT_STATUS_NOT_FOUND;
3228                 goto fail;
3229         }
3230
3231         status = open_socket_out(&addr, port, 60, &fd);
3232         if (!NT_STATUS_IS_OK(status)) {
3233                 goto fail;
3234         }
3235         set_socket_options(fd, lp_socket_options());
3236
3237         status = rpc_transport_sock_init(result, fd, &result->transport);
3238         if (!NT_STATUS_IS_OK(status)) {
3239                 close(fd);
3240                 goto fail;
3241         }
3242
3243         *presult = result;
3244         return NT_STATUS_OK;
3245
3246  fail:
3247         TALLOC_FREE(result);
3248         return status;
3249 }
3250
3251 /**
3252  * Determine the tcp port on which a dcerpc interface is listening
3253  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3254  * target host.
3255  */
3256 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3257                                       const struct ndr_syntax_id *abstract_syntax,
3258                                       uint16_t *pport)
3259 {
3260         NTSTATUS status;
3261         struct rpc_pipe_client *epm_pipe = NULL;
3262         struct cli_pipe_auth_data *auth = NULL;
3263         struct dcerpc_binding *map_binding = NULL;
3264         struct dcerpc_binding *res_binding = NULL;
3265         struct epm_twr_t *map_tower = NULL;
3266         struct epm_twr_t *res_towers = NULL;
3267         struct policy_handle *entry_handle = NULL;
3268         uint32_t num_towers = 0;
3269         uint32_t max_towers = 1;
3270         struct epm_twr_p_t towers;
3271         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3272
3273         if (pport == NULL) {
3274                 status = NT_STATUS_INVALID_PARAMETER;
3275                 goto done;
3276         }
3277
3278         /* open the connection to the endpoint mapper */
3279         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3280                                         &ndr_table_epmapper.syntax_id,
3281                                         &epm_pipe);
3282
3283         if (!NT_STATUS_IS_OK(status)) {
3284                 goto done;
3285         }
3286
3287         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3288         if (!NT_STATUS_IS_OK(status)) {
3289                 goto done;
3290         }
3291
3292         status = rpc_pipe_bind(epm_pipe, auth);
3293         if (!NT_STATUS_IS_OK(status)) {
3294                 goto done;
3295         }
3296
3297         /* create tower for asking the epmapper */
3298
3299         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3300         if (map_binding == NULL) {
3301                 status = NT_STATUS_NO_MEMORY;
3302                 goto done;
3303         }
3304
3305         map_binding->transport = NCACN_IP_TCP;
3306         map_binding->object = *abstract_syntax;
3307         map_binding->host = host; /* needed? */
3308         map_binding->endpoint = "0"; /* correct? needed? */
3309
3310         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3311         if (map_tower == NULL) {
3312                 status = NT_STATUS_NO_MEMORY;
3313                 goto done;
3314         }
3315
3316         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3317                                             &(map_tower->tower));
3318         if (!NT_STATUS_IS_OK(status)) {
3319                 goto done;
3320         }
3321
3322         /* allocate further parameters for the epm_Map call */
3323
3324         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3325         if (res_towers == NULL) {
3326                 status = NT_STATUS_NO_MEMORY;
3327                 goto done;
3328         }
3329         towers.twr = res_towers;
3330
3331         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3332         if (entry_handle == NULL) {
3333                 status = NT_STATUS_NO_MEMORY;
3334                 goto done;
3335         }
3336
3337         /* ask the endpoint mapper for the port */
3338
3339         status = rpccli_epm_Map(epm_pipe,
3340                                 tmp_ctx,
3341                                 CONST_DISCARD(struct GUID *,
3342                                               &(abstract_syntax->uuid)),
3343                                 map_tower,
3344                                 entry_handle,
3345                                 max_towers,
3346                                 &num_towers,
3347                                 &towers);
3348
3349         if (!NT_STATUS_IS_OK(status)) {
3350                 goto done;
3351         }
3352
3353         if (num_towers != 1) {
3354                 status = NT_STATUS_UNSUCCESSFUL;
3355                 goto done;
3356         }
3357
3358         /* extract the port from the answer */
3359
3360         status = dcerpc_binding_from_tower(tmp_ctx,
3361                                            &(towers.twr->tower),
3362                                            &res_binding);
3363         if (!NT_STATUS_IS_OK(status)) {
3364                 goto done;
3365         }
3366
3367         /* are further checks here necessary? */
3368         if (res_binding->transport != NCACN_IP_TCP) {
3369                 status = NT_STATUS_UNSUCCESSFUL;
3370                 goto done;
3371         }
3372
3373         *pport = (uint16_t)atoi(res_binding->endpoint);
3374
3375 done:
3376         TALLOC_FREE(tmp_ctx);
3377         return status;
3378 }
3379
3380 /**
3381  * Create a rpc pipe client struct, connecting to a host via tcp.
3382  * The port is determined by asking the endpoint mapper on the given
3383  * host.
3384  */
3385 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3386                            const struct ndr_syntax_id *abstract_syntax,
3387                            struct rpc_pipe_client **presult)
3388 {
3389         NTSTATUS status;
3390         uint16_t port = 0;
3391
3392         *presult = NULL;
3393
3394         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3395         if (!NT_STATUS_IS_OK(status)) {
3396                 goto done;
3397         }
3398
3399         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3400                                         abstract_syntax, presult);
3401
3402 done:
3403         return status;
3404 }
3405
3406 /********************************************************************
3407  Create a rpc pipe client struct, connecting to a unix domain socket
3408  ********************************************************************/
3409 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3410                                const struct ndr_syntax_id *abstract_syntax,
3411                                struct rpc_pipe_client **presult)
3412 {
3413         struct rpc_pipe_client *result;
3414         struct sockaddr_un addr;
3415         NTSTATUS status;
3416         int fd;
3417
3418         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3419         if (result == NULL) {
3420                 return NT_STATUS_NO_MEMORY;
3421         }
3422
3423         result->abstract_syntax = *abstract_syntax;
3424         result->transfer_syntax = ndr_transfer_syntax;
3425         result->dispatch = cli_do_rpc_ndr;
3426
3427         result->desthost = get_myname(result);
3428         result->srv_name_slash = talloc_asprintf_strupper_m(
3429                 result, "\\\\%s", result->desthost);
3430         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3431                 status = NT_STATUS_NO_MEMORY;
3432                 goto fail;
3433         }
3434
3435         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3436         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3437
3438         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3439         if (fd == -1) {
3440                 status = map_nt_error_from_unix(errno);
3441                 goto fail;
3442         }
3443
3444         ZERO_STRUCT(addr);
3445         addr.sun_family = AF_UNIX;
3446         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3447
3448         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3449                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3450                           strerror(errno)));
3451                 close(fd);
3452                 return map_nt_error_from_unix(errno);
3453         }
3454
3455         status = rpc_transport_sock_init(result, fd, &result->transport);
3456         if (!NT_STATUS_IS_OK(status)) {
3457                 close(fd);
3458                 goto fail;
3459         }
3460
3461         *presult = result;
3462         return NT_STATUS_OK;
3463
3464  fail:
3465         TALLOC_FREE(result);
3466         return status;
3467 }
3468
3469 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3470 {
3471         struct cli_state *cli;
3472
3473         cli = rpc_pipe_np_smb_conn(p);
3474         if (cli != NULL) {
3475                 DLIST_REMOVE(cli->pipe_list, p);
3476         }
3477         return 0;
3478 }
3479
3480 /****************************************************************************
3481  Open a named pipe over SMB to a remote server.
3482  *
3483  * CAVEAT CALLER OF THIS FUNCTION:
3484  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3485  *    so be sure that this function is called AFTER any structure (vs pointer)
3486  *    assignment of the cli.  In particular, libsmbclient does structure
3487  *    assignments of cli, which invalidates the data in the returned
3488  *    rpc_pipe_client if this function is called before the structure assignment
3489  *    of cli.
3490  * 
3491  ****************************************************************************/
3492
3493 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3494                                  const struct ndr_syntax_id *abstract_syntax,
3495                                  struct rpc_pipe_client **presult)
3496 {
3497         struct rpc_pipe_client *result;
3498         NTSTATUS status;
3499
3500         /* sanity check to protect against crashes */
3501
3502         if ( !cli ) {
3503                 return NT_STATUS_INVALID_HANDLE;
3504         }
3505
3506         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3507         if (result == NULL) {
3508                 return NT_STATUS_NO_MEMORY;
3509         }
3510
3511         result->abstract_syntax = *abstract_syntax;
3512         result->transfer_syntax = ndr_transfer_syntax;
3513         result->dispatch = cli_do_rpc_ndr;
3514         result->desthost = talloc_strdup(result, cli->desthost);
3515         result->srv_name_slash = talloc_asprintf_strupper_m(
3516                 result, "\\\\%s", result->desthost);
3517
3518         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3519         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3520
3521         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3522                 TALLOC_FREE(result);
3523                 return NT_STATUS_NO_MEMORY;
3524         }
3525
3526         status = rpc_transport_np_init(result, cli, abstract_syntax,
3527                                        &result->transport);
3528         if (!NT_STATUS_IS_OK(status)) {
3529                 TALLOC_FREE(result);
3530                 return status;
3531         }
3532
3533         DLIST_ADD(cli->pipe_list, result);
3534         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3535
3536         *presult = result;
3537         return NT_STATUS_OK;
3538 }
3539
3540 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3541                              struct rpc_cli_smbd_conn *conn,
3542                              const struct ndr_syntax_id *syntax,
3543                              struct rpc_pipe_client **presult)
3544 {
3545         struct rpc_pipe_client *result;
3546         struct cli_pipe_auth_data *auth;
3547         NTSTATUS status;
3548
3549         result = talloc(mem_ctx, struct rpc_pipe_client);
3550         if (result == NULL) {
3551                 return NT_STATUS_NO_MEMORY;
3552         }
3553         result->abstract_syntax = *syntax;
3554         result->transfer_syntax = ndr_transfer_syntax;
3555         result->dispatch = cli_do_rpc_ndr;
3556         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3557         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3558
3559         result->desthost = talloc_strdup(result, global_myname());
3560         result->srv_name_slash = talloc_asprintf_strupper_m(
3561                 result, "\\\\%s", global_myname());
3562         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3563                 TALLOC_FREE(result);
3564                 return NT_STATUS_NO_MEMORY;
3565         }
3566
3567         status = rpc_transport_smbd_init(result, conn, syntax,
3568                                          &result->transport);
3569         if (!NT_STATUS_IS_OK(status)) {
3570                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3571                           nt_errstr(status)));
3572                 TALLOC_FREE(result);
3573                 return status;
3574         }
3575
3576         status = rpccli_anon_bind_data(result, &auth);
3577         if (!NT_STATUS_IS_OK(status)) {
3578                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3579                           nt_errstr(status)));
3580                 TALLOC_FREE(result);
3581                 return status;
3582         }
3583
3584         status = rpc_pipe_bind(result, auth);
3585         if (!NT_STATUS_IS_OK(status)) {
3586                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3587                 TALLOC_FREE(result);
3588                 return status;
3589         }
3590
3591         *presult = result;
3592         return NT_STATUS_OK;
3593 }
3594
3595 /****************************************************************************
3596  Open a pipe to a remote server.
3597  ****************************************************************************/
3598
3599 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3600                                   const struct ndr_syntax_id *interface,
3601                                   struct rpc_pipe_client **presult)
3602 {
3603         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3604                 /*
3605                  * We should have a better way to figure out this drsuapi
3606                  * speciality...
3607                  */
3608                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3609                                          presult);
3610         }
3611
3612         return rpc_pipe_open_np(cli, interface, presult);
3613 }
3614
3615 /****************************************************************************
3616  Open a named pipe to an SMB server and bind anonymously.
3617  ****************************************************************************/
3618
3619 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3620                                   const struct ndr_syntax_id *interface,
3621                                   struct rpc_pipe_client **presult)
3622 {
3623         struct rpc_pipe_client *result;
3624         struct cli_pipe_auth_data *auth;
3625         NTSTATUS status;
3626
3627         status = cli_rpc_pipe_open(cli, interface, &result);
3628         if (!NT_STATUS_IS_OK(status)) {
3629                 return status;
3630         }
3631
3632         status = rpccli_anon_bind_data(result, &auth);
3633         if (!NT_STATUS_IS_OK(status)) {
3634                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3635                           nt_errstr(status)));
3636                 TALLOC_FREE(result);
3637                 return status;
3638         }
3639
3640         /*
3641          * This is a bit of an abstraction violation due to the fact that an
3642          * anonymous bind on an authenticated SMB inherits the user/domain
3643          * from the enclosing SMB creds
3644          */
3645
3646         TALLOC_FREE(auth->user_name);
3647         TALLOC_FREE(auth->domain);
3648
3649         auth->user_name = talloc_strdup(auth, cli->user_name);
3650         auth->domain = talloc_strdup(auth, cli->domain);
3651         auth->user_session_key = data_blob_talloc(auth,
3652                 cli->user_session_key.data,
3653                 cli->user_session_key.length);
3654
3655         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3656                 TALLOC_FREE(result);
3657                 return NT_STATUS_NO_MEMORY;
3658         }
3659
3660         status = rpc_pipe_bind(result, auth);
3661         if (!NT_STATUS_IS_OK(status)) {
3662                 int lvl = 0;
3663                 if (ndr_syntax_id_equal(interface,
3664                                         &ndr_table_dssetup.syntax_id)) {
3665                         /* non AD domains just don't have this pipe, avoid
3666                          * level 0 statement in that case - gd */
3667                         lvl = 3;
3668                 }
3669                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3670                             "%s failed with error %s\n",
3671                             get_pipe_name_from_iface(interface),
3672                             nt_errstr(status) ));
3673                 TALLOC_FREE(result);
3674                 return status;
3675         }
3676
3677         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3678                   "%s and bound anonymously.\n",
3679                   get_pipe_name_from_iface(interface), cli->desthost));
3680
3681         *presult = result;
3682         return NT_STATUS_OK;
3683 }
3684
3685 /****************************************************************************
3686  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3687  ****************************************************************************/
3688
3689 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3690                                                    const struct ndr_syntax_id *interface,
3691                                                    enum pipe_auth_type auth_type,
3692                                                    enum pipe_auth_level auth_level,
3693                                                    const char *domain,
3694                                                    const char *username,
3695                                                    const char *password,
3696                                                    struct rpc_pipe_client **presult)
3697 {
3698         struct rpc_pipe_client *result;
3699         struct cli_pipe_auth_data *auth;
3700         NTSTATUS status;
3701
3702         status = cli_rpc_pipe_open(cli, interface, &result);
3703         if (!NT_STATUS_IS_OK(status)) {
3704                 return status;
3705         }
3706
3707         status = rpccli_ntlmssp_bind_data(
3708                 result, auth_type, auth_level, domain, username,
3709                 password, &auth);
3710         if (!NT_STATUS_IS_OK(status)) {
3711                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3712                           nt_errstr(status)));
3713                 goto err;
3714         }
3715
3716         status = rpc_pipe_bind(result, auth);
3717         if (!NT_STATUS_IS_OK(status)) {
3718                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3719                         nt_errstr(status) ));
3720                 goto err;
3721         }
3722
3723         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3724                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3725                   get_pipe_name_from_iface(interface), cli->desthost, domain,
3726                   username ));
3727
3728         *presult = result;
3729         return NT_STATUS_OK;
3730
3731   err:
3732
3733         TALLOC_FREE(result);
3734         return status;
3735 }
3736
3737 /****************************************************************************
3738  External interface.
3739  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3740  ****************************************************************************/
3741
3742 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3743                                    const struct ndr_syntax_id *interface,
3744                                    enum pipe_auth_level auth_level,
3745                                    const char *domain,
3746                                    const char *username,
3747                                    const char *password,
3748                                    struct rpc_pipe_client **presult)
3749 {
3750         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3751                                                 interface,
3752                                                 PIPE_AUTH_TYPE_NTLMSSP,
3753                                                 auth_level,
3754                                                 domain,
3755                                                 username,
3756                                                 password,
3757                                                 presult);
3758 }
3759
3760 /****************************************************************************
3761  External interface.
3762  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3763  ****************************************************************************/
3764
3765 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3766                                           const struct ndr_syntax_id *interface,
3767                                           enum pipe_auth_level auth_level,
3768                                           const char *domain,
3769                                           const char *username,
3770                                           const char *password,
3771                                           struct rpc_pipe_client **presult)
3772 {
3773         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3774                                                 interface,
3775                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3776                                                 auth_level,
3777                                                 domain,
3778                                                 username,
3779                                                 password,
3780                                                 presult);
3781 }
3782
3783 /****************************************************************************
3784   Get a the schannel session key out of an already opened netlogon pipe.
3785  ****************************************************************************/
3786 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3787                                                 struct cli_state *cli,
3788                                                 const char *domain,
3789                                                 uint32 *pneg_flags)
3790 {
3791         uint32 sec_chan_type = 0;
3792         unsigned char machine_pwd[16];
3793         const char *machine_account;
3794         NTSTATUS status;
3795
3796         /* Get the machine account credentials from secrets.tdb. */
3797         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3798                                &sec_chan_type))
3799         {
3800                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3801                         "trust account password for domain '%s'\n",
3802                         domain));
3803                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3804         }
3805
3806         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3807                                         cli->desthost, /* server name */
3808                                         domain,        /* domain */
3809                                         global_myname(), /* client name */
3810                                         machine_account, /* machine account name */
3811                                         machine_pwd,
3812                                         sec_chan_type,
3813                                         pneg_flags);
3814
3815         if (!NT_STATUS_IS_OK(status)) {
3816                 DEBUG(3, ("get_schannel_session_key_common: "
3817                           "rpccli_netlogon_setup_creds failed with result %s "
3818                           "to server %s, domain %s, machine account %s.\n",
3819                           nt_errstr(status), cli->desthost, domain,
3820                           machine_account ));
3821                 return status;
3822         }
3823
3824         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3825                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3826                         cli->desthost));
3827                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3828         }
3829
3830         return NT_STATUS_OK;;
3831 }
3832
3833 /****************************************************************************
3834  Open a netlogon pipe and get the schannel session key.
3835  Now exposed to external callers.
3836  ****************************************************************************/
3837
3838
3839 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3840                                   const char *domain,
3841                                   uint32 *pneg_flags,
3842                                   struct rpc_pipe_client **presult)
3843 {
3844         struct rpc_pipe_client *netlogon_pipe = NULL;
3845         NTSTATUS status;
3846
3847         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3848                                           &netlogon_pipe);
3849         if (!NT_STATUS_IS_OK(status)) {
3850                 return status;
3851         }
3852
3853         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3854                                                  pneg_flags);
3855         if (!NT_STATUS_IS_OK(status)) {
3856                 TALLOC_FREE(netlogon_pipe);
3857                 return status;
3858         }
3859
3860         *presult = netlogon_pipe;
3861         return NT_STATUS_OK;
3862 }
3863
3864 /****************************************************************************
3865  External interface.
3866  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3867  using session_key. sign and seal.
3868  ****************************************************************************/
3869
3870 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3871                                              const struct ndr_syntax_id *interface,
3872                                              enum pipe_auth_level auth_level,
3873                                              const char *domain,
3874                                              const struct dcinfo *pdc,
3875                                              struct rpc_pipe_client **presult)
3876 {
3877         struct rpc_pipe_client *result;
3878         struct cli_pipe_auth_data *auth;
3879         NTSTATUS status;
3880
3881         status = cli_rpc_pipe_open(cli, interface, &result);
3882         if (!NT_STATUS_IS_OK(status)) {
3883                 return status;
3884         }
3885
3886         status = rpccli_schannel_bind_data(result, domain, auth_level,
3887                                            pdc->sess_key, &auth);
3888         if (!NT_STATUS_IS_OK(status)) {
3889                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3890                           nt_errstr(status)));
3891                 TALLOC_FREE(result);
3892                 return status;
3893         }
3894
3895         status = rpc_pipe_bind(result, auth);
3896         if (!NT_STATUS_IS_OK(status)) {
3897                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3898                           "cli_rpc_pipe_bind failed with error %s\n",
3899                           nt_errstr(status) ));
3900                 TALLOC_FREE(result);
3901                 return status;
3902         }
3903
3904         /*
3905          * The credentials on a new netlogon pipe are the ones we are passed
3906          * in - copy them over.
3907          */
3908         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3909         if (result->dc == NULL) {
3910                 DEBUG(0, ("talloc failed\n"));
3911                 TALLOC_FREE(result);
3912                 return NT_STATUS_NO_MEMORY;
3913         }
3914
3915         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3916                   "for domain %s and bound using schannel.\n",
3917                   get_pipe_name_from_iface(interface),
3918                   cli->desthost, domain ));
3919
3920         *presult = result;
3921         return NT_STATUS_OK;
3922 }
3923
3924 /****************************************************************************
3925  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3926  Fetch the session key ourselves using a temporary netlogon pipe. This
3927  version uses an ntlmssp auth bound netlogon pipe to get the key.
3928  ****************************************************************************/
3929
3930 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3931                                                       const char *domain,
3932                                                       const char *username,
3933                                                       const char *password,
3934                                                       uint32 *pneg_flags,
3935                                                       struct rpc_pipe_client **presult)
3936 {
3937         struct rpc_pipe_client *netlogon_pipe = NULL;
3938         NTSTATUS status;
3939
3940         status = cli_rpc_pipe_open_spnego_ntlmssp(
3941                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3942                 domain, username, password, &netlogon_pipe);
3943         if (!NT_STATUS_IS_OK(status)) {
3944                 return status;
3945         }
3946
3947         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3948                                                  pneg_flags);
3949         if (!NT_STATUS_IS_OK(status)) {
3950                 TALLOC_FREE(netlogon_pipe);
3951                 return status;
3952         }
3953
3954         *presult = netlogon_pipe;
3955         return NT_STATUS_OK;
3956 }
3957
3958 /****************************************************************************
3959  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3960  Fetch the session key ourselves using a temporary netlogon pipe. This version
3961  uses an ntlmssp bind to get the session key.
3962  ****************************************************************************/
3963
3964 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3965                                                  const struct ndr_syntax_id *interface,
3966                                                  enum pipe_auth_level auth_level,
3967                                                  const char *domain,
3968                                                  const char *username,
3969                                                  const char *password,
3970                                                  struct rpc_pipe_client **presult)
3971 {
3972         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3973         struct rpc_pipe_client *netlogon_pipe = NULL;
3974         struct rpc_pipe_client *result = NULL;
3975         NTSTATUS status;
3976
3977         status = get_schannel_session_key_auth_ntlmssp(
3978                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3979         if (!NT_STATUS_IS_OK(status)) {
3980                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3981                         "key from server %s for domain %s.\n",
3982                         cli->desthost, domain ));
3983                 return status;
3984         }
3985
3986         status = cli_rpc_pipe_open_schannel_with_key(
3987                 cli, interface, auth_level, domain, netlogon_pipe->dc,
3988                 &result);
3989
3990         /* Now we've bound using the session key we can close the netlog pipe. */
3991         TALLOC_FREE(netlogon_pipe);
3992
3993         if (NT_STATUS_IS_OK(status)) {
3994                 *presult = result;
3995         }
3996         return status;
3997 }
3998
3999 /****************************************************************************
4000  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4001  Fetch the session key ourselves using a temporary netlogon pipe.
4002  ****************************************************************************/
4003
4004 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4005                                     const struct ndr_syntax_id *interface,
4006                                     enum pipe_auth_level auth_level,
4007                                     const char *domain,
4008                                     struct rpc_pipe_client **presult)
4009 {
4010         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4011         struct rpc_pipe_client *netlogon_pipe = NULL;
4012         struct rpc_pipe_client *result = NULL;
4013         NTSTATUS status;
4014
4015         status = get_schannel_session_key(cli, domain, &neg_flags,
4016                                           &netlogon_pipe);
4017         if (!NT_STATUS_IS_OK(status)) {
4018                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4019                         "key from server %s for domain %s.\n",
4020                         cli->desthost, domain ));
4021                 return status;
4022         }
4023
4024         status = cli_rpc_pipe_open_schannel_with_key(
4025                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4026                 &result);
4027
4028         /* Now we've bound using the session key we can close the netlog pipe. */
4029         TALLOC_FREE(netlogon_pipe);
4030
4031         if (NT_STATUS_IS_OK(status)) {
4032                 *presult = result;
4033         }
4034
4035         return NT_STATUS_OK;
4036 }
4037
4038 /****************************************************************************
4039  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4040  The idea is this can be called with service_princ, username and password all
4041  NULL so long as the caller has a TGT.
4042  ****************************************************************************/
4043
4044 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4045                                 const struct ndr_syntax_id *interface,
4046                                 enum pipe_auth_level auth_level,
4047                                 const char *service_princ,
4048                                 const char *username,
4049                                 const char *password,
4050                                 struct rpc_pipe_client **presult)
4051 {
4052 #ifdef HAVE_KRB5
4053         struct rpc_pipe_client *result;
4054         struct cli_pipe_auth_data *auth;
4055         NTSTATUS status;
4056
4057         status = cli_rpc_pipe_open(cli, interface, &result);
4058         if (!NT_STATUS_IS_OK(status)) {
4059                 return status;
4060         }
4061
4062         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4063                                            username, password, &auth);
4064         if (!NT_STATUS_IS_OK(status)) {
4065                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4066                           nt_errstr(status)));
4067                 TALLOC_FREE(result);
4068                 return status;
4069         }
4070
4071         status = rpc_pipe_bind(result, auth);
4072         if (!NT_STATUS_IS_OK(status)) {
4073                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4074                           "with error %s\n", nt_errstr(status)));
4075                 TALLOC_FREE(result);
4076                 return status;
4077         }
4078
4079         *presult = result;
4080         return NT_STATUS_OK;
4081 #else
4082         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4083         return NT_STATUS_NOT_IMPLEMENTED;
4084 #endif
4085 }
4086
4087 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4088                              struct rpc_pipe_client *cli,
4089                              DATA_BLOB *session_key)
4090 {
4091         if (!session_key || !cli) {
4092                 return NT_STATUS_INVALID_PARAMETER;
4093         }
4094
4095         if (!cli->auth) {
4096                 return NT_STATUS_INVALID_PARAMETER;
4097         }
4098
4099         switch (cli->auth->auth_type) {
4100                 case PIPE_AUTH_TYPE_SCHANNEL:
4101                         *session_key = data_blob_talloc(mem_ctx,
4102                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4103                         break;
4104                 case PIPE_AUTH_TYPE_NTLMSSP:
4105                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4106                         *session_key = data_blob_talloc(mem_ctx,
4107                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4108                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4109                         break;
4110                 case PIPE_AUTH_TYPE_KRB5:
4111                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4112                         *session_key = data_blob_talloc(mem_ctx,
4113                                 cli->auth->a_u.kerberos_auth->session_key.data,
4114                                 cli->auth->a_u.kerberos_auth->session_key.length);
4115                         break;
4116                 case PIPE_AUTH_TYPE_NONE:
4117                         *session_key = data_blob_talloc(mem_ctx,
4118                                 cli->auth->user_session_key.data,
4119                                 cli->auth->user_session_key.length);
4120                         break;
4121                 default:
4122                         return NT_STATUS_NO_USER_SESSION_KEY;
4123         }
4124
4125         return NT_STATUS_OK;
4126 }