Convert rpc_cli_transport->write to tevent_req
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct tevent_req *subreq);
211
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                         struct event_context *ev,
214                                         struct rpc_cli_transport *transport,
215                                         uint8_t *data, size_t size)
216 {
217         struct tevent_req *req, *subreq;
218         struct rpc_read_state *state;
219
220         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
221         if (req == NULL) {
222                 return NULL;
223         }
224         state->ev = ev;
225         state->transport = transport;
226         state->data = data;
227         state->size = size;
228         state->num_read = 0;
229
230         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
231
232         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
233                                       transport->priv);
234         if (subreq == NULL) {
235                 goto fail;
236         }
237         tevent_req_set_callback(subreq, rpc_read_done, req);
238         return req;
239
240  fail:
241         TALLOC_FREE(req);
242         return NULL;
243 }
244
245 static void rpc_read_done(struct tevent_req *subreq)
246 {
247         struct tevent_req *req = tevent_req_callback_data(
248                 subreq, struct tevent_req);
249         struct rpc_read_state *state = tevent_req_data(
250                 req, struct rpc_read_state);
251         NTSTATUS status;
252         ssize_t received;
253
254         status = state->transport->read_recv(subreq, &received);
255         TALLOC_FREE(subreq);
256         if (!NT_STATUS_IS_OK(status)) {
257                 tevent_req_nterror(req, status);
258                 return;
259         }
260
261         state->num_read += received;
262         if (state->num_read == state->size) {
263                 tevent_req_done(req);
264                 return;
265         }
266
267         subreq = state->transport->read_send(state, state->ev,
268                                              state->data + state->num_read,
269                                              state->size - state->num_read,
270                                              state->transport->priv);
271         if (tevent_req_nomem(subreq, req)) {
272                 return;
273         }
274         tevent_req_set_callback(subreq, rpc_read_done, req);
275 }
276
277 static NTSTATUS rpc_read_recv(struct tevent_req *req)
278 {
279         return tevent_req_simple_recv_ntstatus(req);
280 }
281
282 struct rpc_write_state {
283         struct event_context *ev;
284         struct rpc_cli_transport *transport;
285         const uint8_t *data;
286         size_t size;
287         size_t num_written;
288 };
289
290 static void rpc_write_done(struct tevent_req *subreq);
291
292 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
293                                          struct event_context *ev,
294                                          struct rpc_cli_transport *transport,
295                                          const uint8_t *data, size_t size)
296 {
297         struct tevent_req *req, *subreq;
298         struct rpc_write_state *state;
299
300         req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
301         if (req == NULL) {
302                 return NULL;
303         }
304         state->ev = ev;
305         state->transport = transport;
306         state->data = data;
307         state->size = size;
308         state->num_written = 0;
309
310         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
311
312         subreq = transport->write_send(state, ev, data, size, transport->priv);
313         if (subreq == NULL) {
314                 goto fail;
315         }
316         tevent_req_set_callback(subreq, rpc_write_done, req);
317         return req;
318  fail:
319         TALLOC_FREE(req);
320         return NULL;
321 }
322
323 static void rpc_write_done(struct tevent_req *subreq)
324 {
325         struct tevent_req *req = tevent_req_callback_data(
326                 subreq, struct tevent_req);
327         struct rpc_write_state *state = tevent_req_data(
328                 req, struct rpc_write_state);
329         NTSTATUS status;
330         ssize_t written;
331
332         status = state->transport->write_recv(subreq, &written);
333         TALLOC_FREE(subreq);
334         if (!NT_STATUS_IS_OK(status)) {
335                 tevent_req_nterror(req, status);
336                 return;
337         }
338
339         state->num_written += written;
340
341         if (state->num_written == state->size) {
342                 tevent_req_done(req);
343                 return;
344         }
345
346         subreq = state->transport->write_send(state, state->ev,
347                                               state->data + state->num_written,
348                                               state->size - state->num_written,
349                                               state->transport->priv);
350         if (tevent_req_nomem(subreq, req)) {
351                 return;
352         }
353         tevent_req_set_callback(subreq, rpc_write_done, req);
354 }
355
356 static NTSTATUS rpc_write_recv(struct tevent_req *req)
357 {
358         return tevent_req_simple_recv_ntstatus(req);
359 }
360
361
362 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
363                                  struct rpc_hdr_info *prhdr,
364                                  prs_struct *pdu)
365 {
366         /*
367          * This next call sets the endian bit correctly in current_pdu. We
368          * will propagate this to rbuf later.
369          */
370
371         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
372                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
373                 return NT_STATUS_BUFFER_TOO_SMALL;
374         }
375
376         if (prhdr->frag_len > cli->max_recv_frag) {
377                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
378                           " we only allow %d\n", (int)prhdr->frag_len,
379                           (int)cli->max_recv_frag));
380                 return NT_STATUS_BUFFER_TOO_SMALL;
381         }
382
383         return NT_STATUS_OK;
384 }
385
386 /****************************************************************************
387  Try and get a PDU's worth of data from current_pdu. If not, then read more
388  from the wire.
389  ****************************************************************************/
390
391 struct get_complete_frag_state {
392         struct event_context *ev;
393         struct rpc_pipe_client *cli;
394         struct rpc_hdr_info *prhdr;
395         prs_struct *pdu;
396 };
397
398 static void get_complete_frag_got_header(struct tevent_req *subreq);
399 static void get_complete_frag_got_rest(struct tevent_req *subreq);
400
401 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
402                                                  struct event_context *ev,
403                                                  struct rpc_pipe_client *cli,
404                                                  struct rpc_hdr_info *prhdr,
405                                                  prs_struct *pdu)
406 {
407         struct tevent_req *req, *subreq;
408         struct get_complete_frag_state *state;
409         uint32_t pdu_len;
410         NTSTATUS status;
411
412         req = tevent_req_create(mem_ctx, &state,
413                                 struct get_complete_frag_state);
414         if (req == NULL) {
415                 return NULL;
416         }
417         state->ev = ev;
418         state->cli = cli;
419         state->prhdr = prhdr;
420         state->pdu = pdu;
421
422         pdu_len = prs_data_size(pdu);
423         if (pdu_len < RPC_HEADER_LEN) {
424                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
425                         status = NT_STATUS_NO_MEMORY;
426                         goto post_status;
427                 }
428                 subreq = rpc_read_send(
429                         state, state->ev,
430                         state->cli->transport,
431                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
432                         RPC_HEADER_LEN - pdu_len);
433                 if (subreq == NULL) {
434                         status = NT_STATUS_NO_MEMORY;
435                         goto post_status;
436                 }
437                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
438                                         req);
439                 return req;
440         }
441
442         status = parse_rpc_header(cli, prhdr, pdu);
443         if (!NT_STATUS_IS_OK(status)) {
444                 goto post_status;
445         }
446
447         /*
448          * Ensure we have frag_len bytes of data.
449          */
450         if (pdu_len < prhdr->frag_len) {
451                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
452                         status = NT_STATUS_NO_MEMORY;
453                         goto post_status;
454                 }
455                 subreq = rpc_read_send(state, state->ev,
456                                        state->cli->transport,
457                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
458                                        prhdr->frag_len - pdu_len);
459                 if (subreq == NULL) {
460                         status = NT_STATUS_NO_MEMORY;
461                         goto post_status;
462                 }
463                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
464                                         req);
465                 return req;
466         }
467
468         status = NT_STATUS_OK;
469  post_status:
470         if (NT_STATUS_IS_OK(status)) {
471                 tevent_req_done(req);
472         } else {
473                 tevent_req_nterror(req, status);
474         }
475         return tevent_req_post(req, ev);
476 }
477
478 static void get_complete_frag_got_header(struct tevent_req *subreq)
479 {
480         struct tevent_req *req = tevent_req_callback_data(
481                 subreq, struct tevent_req);
482         struct get_complete_frag_state *state = tevent_req_data(
483                 req, struct get_complete_frag_state);
484         NTSTATUS status;
485
486         status = rpc_read_recv(subreq);
487         TALLOC_FREE(subreq);
488         if (!NT_STATUS_IS_OK(status)) {
489                 tevent_req_nterror(req, status);
490                 return;
491         }
492
493         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
494         if (!NT_STATUS_IS_OK(status)) {
495                 tevent_req_nterror(req, status);
496                 return;
497         }
498
499         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
500                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
501                 return;
502         }
503
504         /*
505          * We're here in this piece of code because we've read exactly
506          * RPC_HEADER_LEN bytes into state->pdu.
507          */
508
509         subreq = rpc_read_send(
510                 state, state->ev, state->cli->transport,
511                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
512                 state->prhdr->frag_len - RPC_HEADER_LEN);
513         if (tevent_req_nomem(subreq, req)) {
514                 return;
515         }
516         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
517 }
518
519 static void get_complete_frag_got_rest(struct tevent_req *subreq)
520 {
521         struct tevent_req *req = tevent_req_callback_data(
522                 subreq, struct tevent_req);
523         NTSTATUS status;
524
525         status = rpc_read_recv(subreq);
526         TALLOC_FREE(subreq);
527         if (!NT_STATUS_IS_OK(status)) {
528                 tevent_req_nterror(req, status);
529                 return;
530         }
531         tevent_req_done(req);
532 }
533
534 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
535 {
536         return tevent_req_simple_recv_ntstatus(req);
537 }
538
539 /****************************************************************************
540  NTLMSSP specific sign/seal.
541  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
542  In fact I should probably abstract these into identical pieces of code... JRA.
543  ****************************************************************************/
544
545 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
546                                 prs_struct *current_pdu,
547                                 uint8 *p_ss_padding_len)
548 {
549         RPC_HDR_AUTH auth_info;
550         uint32 save_offset = prs_offset(current_pdu);
551         uint32 auth_len = prhdr->auth_len;
552         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
553         unsigned char *data = NULL;
554         size_t data_len;
555         unsigned char *full_packet_data = NULL;
556         size_t full_packet_data_len;
557         DATA_BLOB auth_blob;
558         NTSTATUS status;
559
560         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
561             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
562                 return NT_STATUS_OK;
563         }
564
565         if (!ntlmssp_state) {
566                 return NT_STATUS_INVALID_PARAMETER;
567         }
568
569         /* Ensure there's enough data for an authenticated response. */
570         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
571                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
572                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
573                         (unsigned int)auth_len ));
574                 return NT_STATUS_BUFFER_TOO_SMALL;
575         }
576
577         /*
578          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
579          * after the RPC header.
580          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
581          * functions as NTLMv2 checks the rpc headers also.
582          */
583
584         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
585         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
586
587         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
588         full_packet_data_len = prhdr->frag_len - auth_len;
589
590         /* Pull the auth header and the following data into a blob. */
591         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
592                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
593                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
594                 return NT_STATUS_BUFFER_TOO_SMALL;
595         }
596
597         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
598                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
599                 return NT_STATUS_BUFFER_TOO_SMALL;
600         }
601
602         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
603         auth_blob.length = auth_len;
604
605         switch (cli->auth->auth_level) {
606                 case PIPE_AUTH_LEVEL_PRIVACY:
607                         /* Data is encrypted. */
608                         status = ntlmssp_unseal_packet(ntlmssp_state,
609                                                         data, data_len,
610                                                         full_packet_data,
611                                                         full_packet_data_len,
612                                                         &auth_blob);
613                         if (!NT_STATUS_IS_OK(status)) {
614                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
615                                         "packet from %s. Error was %s.\n",
616                                         rpccli_pipe_txt(debug_ctx(), cli),
617                                         nt_errstr(status) ));
618                                 return status;
619                         }
620                         break;
621                 case PIPE_AUTH_LEVEL_INTEGRITY:
622                         /* Data is signed. */
623                         status = ntlmssp_check_packet(ntlmssp_state,
624                                                         data, data_len,
625                                                         full_packet_data,
626                                                         full_packet_data_len,
627                                                         &auth_blob);
628                         if (!NT_STATUS_IS_OK(status)) {
629                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
630                                         "packet from %s. Error was %s.\n",
631                                         rpccli_pipe_txt(debug_ctx(), cli),
632                                         nt_errstr(status) ));
633                                 return status;
634                         }
635                         break;
636                 default:
637                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
638                                   "auth level %d\n", cli->auth->auth_level));
639                         return NT_STATUS_INVALID_INFO_CLASS;
640         }
641
642         /*
643          * Return the current pointer to the data offset.
644          */
645
646         if(!prs_set_offset(current_pdu, save_offset)) {
647                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
648                         (unsigned int)save_offset ));
649                 return NT_STATUS_BUFFER_TOO_SMALL;
650         }
651
652         /*
653          * Remember the padding length. We must remove it from the real data
654          * stream once the sign/seal is done.
655          */
656
657         *p_ss_padding_len = auth_info.auth_pad_len;
658
659         return NT_STATUS_OK;
660 }
661
662 /****************************************************************************
663  schannel specific sign/seal.
664  ****************************************************************************/
665
666 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
667                                 prs_struct *current_pdu,
668                                 uint8 *p_ss_padding_len)
669 {
670         RPC_HDR_AUTH auth_info;
671         RPC_AUTH_SCHANNEL_CHK schannel_chk;
672         uint32 auth_len = prhdr->auth_len;
673         uint32 save_offset = prs_offset(current_pdu);
674         struct schannel_auth_struct *schannel_auth =
675                 cli->auth->a_u.schannel_auth;
676         uint32 data_len;
677
678         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
679             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
680                 return NT_STATUS_OK;
681         }
682
683         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
684                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
685                 return NT_STATUS_INVALID_PARAMETER;
686         }
687
688         if (!schannel_auth) {
689                 return NT_STATUS_INVALID_PARAMETER;
690         }
691
692         /* Ensure there's enough data for an authenticated response. */
693         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
694                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
695                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
696                         (unsigned int)auth_len ));
697                 return NT_STATUS_INVALID_PARAMETER;
698         }
699
700         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
701
702         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
703                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
704                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
705                 return NT_STATUS_BUFFER_TOO_SMALL;
706         }
707
708         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
709                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
710                 return NT_STATUS_BUFFER_TOO_SMALL;
711         }
712
713         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
714                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
715                         auth_info.auth_type));
716                 return NT_STATUS_BUFFER_TOO_SMALL;
717         }
718
719         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
720                                 &schannel_chk, current_pdu, 0)) {
721                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
722                 return NT_STATUS_BUFFER_TOO_SMALL;
723         }
724
725         if (!schannel_decode(schannel_auth,
726                         cli->auth->auth_level,
727                         SENDER_IS_ACCEPTOR,
728                         &schannel_chk,
729                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
730                         data_len)) {
731                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
732                                 "Connection to %s.\n",
733                                 rpccli_pipe_txt(debug_ctx(), cli)));
734                 return NT_STATUS_INVALID_PARAMETER;
735         }
736
737         /* The sequence number gets incremented on both send and receive. */
738         schannel_auth->seq_num++;
739
740         /*
741          * Return the current pointer to the data offset.
742          */
743
744         if(!prs_set_offset(current_pdu, save_offset)) {
745                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
746                         (unsigned int)save_offset ));
747                 return NT_STATUS_BUFFER_TOO_SMALL;
748         }
749
750         /*
751          * Remember the padding length. We must remove it from the real data
752          * stream once the sign/seal is done.
753          */
754
755         *p_ss_padding_len = auth_info.auth_pad_len;
756
757         return NT_STATUS_OK;
758 }
759
760 /****************************************************************************
761  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
762  ****************************************************************************/
763
764 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
765                                 prs_struct *current_pdu,
766                                 uint8 *p_ss_padding_len)
767 {
768         NTSTATUS ret = NT_STATUS_OK;
769
770         /* Paranioa checks for auth_len. */
771         if (prhdr->auth_len) {
772                 if (prhdr->auth_len > prhdr->frag_len) {
773                         return NT_STATUS_INVALID_PARAMETER;
774                 }
775
776                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
777                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
778                         /* Integer wrap attempt. */
779                         return NT_STATUS_INVALID_PARAMETER;
780                 }
781         }
782
783         /*
784          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
785          */
786
787         switch(cli->auth->auth_type) {
788                 case PIPE_AUTH_TYPE_NONE:
789                         if (prhdr->auth_len) {
790                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
791                                           "Connection to %s - got non-zero "
792                                           "auth len %u.\n",
793                                         rpccli_pipe_txt(debug_ctx(), cli),
794                                         (unsigned int)prhdr->auth_len ));
795                                 return NT_STATUS_INVALID_PARAMETER;
796                         }
797                         break;
798
799                 case PIPE_AUTH_TYPE_NTLMSSP:
800                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
801                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
802                         if (!NT_STATUS_IS_OK(ret)) {
803                                 return ret;
804                         }
805                         break;
806
807                 case PIPE_AUTH_TYPE_SCHANNEL:
808                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
809                         if (!NT_STATUS_IS_OK(ret)) {
810                                 return ret;
811                         }
812                         break;
813
814                 case PIPE_AUTH_TYPE_KRB5:
815                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
816                 default:
817                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
818                                   "to %s - unknown internal auth type %u.\n",
819                                   rpccli_pipe_txt(debug_ctx(), cli),
820                                   cli->auth->auth_type ));
821                         return NT_STATUS_INVALID_INFO_CLASS;
822         }
823
824         return NT_STATUS_OK;
825 }
826
827 /****************************************************************************
828  Do basic authentication checks on an incoming pdu.
829  ****************************************************************************/
830
831 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
832                         prs_struct *current_pdu,
833                         uint8 expected_pkt_type,
834                         char **ppdata,
835                         uint32 *pdata_len,
836                         prs_struct *return_data)
837 {
838
839         NTSTATUS ret = NT_STATUS_OK;
840         uint32 current_pdu_len = prs_data_size(current_pdu);
841
842         if (current_pdu_len != prhdr->frag_len) {
843                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
844                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
845                 return NT_STATUS_INVALID_PARAMETER;
846         }
847
848         /*
849          * Point the return values at the real data including the RPC
850          * header. Just in case the caller wants it.
851          */
852         *ppdata = prs_data_p(current_pdu);
853         *pdata_len = current_pdu_len;
854
855         /* Ensure we have the correct type. */
856         switch (prhdr->pkt_type) {
857                 case RPC_ALTCONTRESP:
858                 case RPC_BINDACK:
859
860                         /* Alter context and bind ack share the same packet definitions. */
861                         break;
862
863
864                 case RPC_RESPONSE:
865                 {
866                         RPC_HDR_RESP rhdr_resp;
867                         uint8 ss_padding_len = 0;
868
869                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
870                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
871                                 return NT_STATUS_BUFFER_TOO_SMALL;
872                         }
873
874                         /* Here's where we deal with incoming sign/seal. */
875                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
876                                         current_pdu, &ss_padding_len);
877                         if (!NT_STATUS_IS_OK(ret)) {
878                                 return ret;
879                         }
880
881                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
882                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
883
884                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
885                                 return NT_STATUS_BUFFER_TOO_SMALL;
886                         }
887
888                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
889
890                         /* Remember to remove the auth footer. */
891                         if (prhdr->auth_len) {
892                                 /* We've already done integer wrap tests on auth_len in
893                                         cli_pipe_validate_rpc_response(). */
894                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
895                                         return NT_STATUS_BUFFER_TOO_SMALL;
896                                 }
897                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
898                         }
899
900                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
901                                 current_pdu_len, *pdata_len, ss_padding_len ));
902
903                         /*
904                          * If this is the first reply, and the allocation hint is reasonably, try and
905                          * set up the return_data parse_struct to the correct size.
906                          */
907
908                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
909                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
910                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
911                                                 "too large to allocate\n",
912                                                 (unsigned int)rhdr_resp.alloc_hint ));
913                                         return NT_STATUS_NO_MEMORY;
914                                 }
915                         }
916
917                         break;
918                 }
919
920                 case RPC_BINDNACK:
921                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
922                                   "received from %s!\n",
923                                   rpccli_pipe_txt(debug_ctx(), cli)));
924                         /* Use this for now... */
925                         return NT_STATUS_NETWORK_ACCESS_DENIED;
926
927                 case RPC_FAULT:
928                 {
929                         RPC_HDR_RESP rhdr_resp;
930                         RPC_HDR_FAULT fault_resp;
931
932                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
933                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
934                                 return NT_STATUS_BUFFER_TOO_SMALL;
935                         }
936
937                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
938                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
939                                 return NT_STATUS_BUFFER_TOO_SMALL;
940                         }
941
942                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
943                                   "code %s received from %s!\n",
944                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
945                                 rpccli_pipe_txt(debug_ctx(), cli)));
946                         if (NT_STATUS_IS_OK(fault_resp.status)) {
947                                 return NT_STATUS_UNSUCCESSFUL;
948                         } else {
949                                 return fault_resp.status;
950                         }
951                 }
952
953                 default:
954                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
955                                 "from %s!\n",
956                                 (unsigned int)prhdr->pkt_type,
957                                 rpccli_pipe_txt(debug_ctx(), cli)));
958                         return NT_STATUS_INVALID_INFO_CLASS;
959         }
960
961         if (prhdr->pkt_type != expected_pkt_type) {
962                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
963                           "got an unexpected RPC packet type - %u, not %u\n",
964                         rpccli_pipe_txt(debug_ctx(), cli),
965                         prhdr->pkt_type,
966                         expected_pkt_type));
967                 return NT_STATUS_INVALID_INFO_CLASS;
968         }
969
970         /* Do this just before return - we don't want to modify any rpc header
971            data before now as we may have needed to do cryptographic actions on
972            it before. */
973
974         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
975                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
976                         "setting fragment first/last ON.\n"));
977                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
978         }
979
980         return NT_STATUS_OK;
981 }
982
983 /****************************************************************************
984  Ensure we eat the just processed pdu from the current_pdu prs_struct.
985  Normally the frag_len and buffer size will match, but on the first trans
986  reply there is a theoretical chance that buffer size > frag_len, so we must
987  deal with that.
988  ****************************************************************************/
989
990 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
991 {
992         uint32 current_pdu_len = prs_data_size(current_pdu);
993
994         if (current_pdu_len < prhdr->frag_len) {
995                 return NT_STATUS_BUFFER_TOO_SMALL;
996         }
997
998         /* Common case. */
999         if (current_pdu_len == (uint32)prhdr->frag_len) {
1000                 prs_mem_free(current_pdu);
1001                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1002                 /* Make current_pdu dynamic with no memory. */
1003                 prs_give_memory(current_pdu, 0, 0, True);
1004                 return NT_STATUS_OK;
1005         }
1006
1007         /*
1008          * Oh no ! More data in buffer than we processed in current pdu.
1009          * Cheat. Move the data down and shrink the buffer.
1010          */
1011
1012         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1013                         current_pdu_len - prhdr->frag_len);
1014
1015         /* Remember to set the read offset back to zero. */
1016         prs_set_offset(current_pdu, 0);
1017
1018         /* Shrink the buffer. */
1019         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1020                 return NT_STATUS_BUFFER_TOO_SMALL;
1021         }
1022
1023         return NT_STATUS_OK;
1024 }
1025
1026 /****************************************************************************
1027  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1028 ****************************************************************************/
1029
1030 struct cli_api_pipe_state {
1031         struct event_context *ev;
1032         struct rpc_cli_transport *transport;
1033         uint8_t *rdata;
1034         uint32_t rdata_len;
1035 };
1036
1037 static void cli_api_pipe_trans_done(struct async_req *subreq);
1038 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1039 static void cli_api_pipe_read_done(struct tevent_req *subreq);
1040
1041 static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1042                                             struct event_context *ev,
1043                                             struct rpc_cli_transport *transport,
1044                                             uint8_t *data, size_t data_len,
1045                                             uint32_t max_rdata_len)
1046 {
1047         struct tevent_req *req;
1048         struct async_req *subreq;
1049         struct tevent_req *subreq2;
1050         struct cli_api_pipe_state *state;
1051         NTSTATUS status;
1052
1053         req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
1054         if (req == NULL) {
1055                 return NULL;
1056         }
1057         state->ev = ev;
1058         state->transport = transport;
1059
1060         if (max_rdata_len < RPC_HEADER_LEN) {
1061                 /*
1062                  * For a RPC reply we always need at least RPC_HEADER_LEN
1063                  * bytes. We check this here because we will receive
1064                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1065                  */
1066                 status = NT_STATUS_INVALID_PARAMETER;
1067                 goto post_status;
1068         }
1069
1070         if (transport->trans_send != NULL) {
1071                 subreq = transport->trans_send(state, ev, data, data_len,
1072                                                max_rdata_len, transport->priv);
1073                 if (subreq == NULL) {
1074                         status = NT_STATUS_NO_MEMORY;
1075                         goto post_status;
1076                 }
1077                 subreq->async.fn = cli_api_pipe_trans_done;
1078                 subreq->async.priv = req;
1079                 return req;
1080         }
1081
1082         /*
1083          * If the transport does not provide a "trans" routine, i.e. for
1084          * example the ncacn_ip_tcp transport, do the write/read step here.
1085          */
1086
1087         subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1088         if (subreq2 == NULL) {
1089                 goto fail;
1090         }
1091         tevent_req_set_callback(subreq2, cli_api_pipe_write_done, req);
1092         return req;
1093
1094         status = NT_STATUS_INVALID_PARAMETER;
1095
1096  post_status:
1097         if (NT_STATUS_IS_OK(status)) {
1098                 tevent_req_done(req);
1099         } else {
1100                 tevent_req_nterror(req, status);
1101         }
1102         return tevent_req_post(req, ev);
1103  fail:
1104         TALLOC_FREE(req);
1105         return NULL;
1106 }
1107
1108 static void cli_api_pipe_trans_done(struct async_req *subreq)
1109 {
1110         struct tevent_req *req = talloc_get_type_abort(
1111                 subreq->async.priv, struct tevent_req);
1112         struct cli_api_pipe_state *state = tevent_req_data(
1113                 req, struct cli_api_pipe_state);
1114         NTSTATUS status;
1115
1116         status = state->transport->trans_recv(subreq, state, &state->rdata,
1117                                               &state->rdata_len);
1118         TALLOC_FREE(subreq);
1119         if (!NT_STATUS_IS_OK(status)) {
1120                 tevent_req_nterror(req, status);
1121                 return;
1122         }
1123         tevent_req_done(req);
1124 }
1125
1126 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1127 {
1128         struct tevent_req *req = tevent_req_callback_data(
1129                 subreq, struct tevent_req);
1130         struct cli_api_pipe_state *state = tevent_req_data(
1131                 req, struct cli_api_pipe_state);
1132         NTSTATUS status;
1133
1134         status = rpc_write_recv(subreq);
1135         TALLOC_FREE(subreq);
1136         if (!NT_STATUS_IS_OK(status)) {
1137                 tevent_req_nterror(req, status);
1138                 return;
1139         }
1140
1141         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1142         if (tevent_req_nomem(state->rdata, req)) {
1143                 return;
1144         }
1145
1146         /*
1147          * We don't need to use rpc_read_send here, the upper layer will cope
1148          * with a short read, transport->trans_send could also return less
1149          * than state->max_rdata_len.
1150          */
1151         subreq = state->transport->read_send(state, state->ev, state->rdata,
1152                                              RPC_HEADER_LEN,
1153                                              state->transport->priv);
1154         if (tevent_req_nomem(subreq, req)) {
1155                 return;
1156         }
1157         tevent_req_set_callback(subreq, cli_api_pipe_read_done, req);
1158 }
1159
1160 static void cli_api_pipe_read_done(struct tevent_req *subreq)
1161 {
1162         struct tevent_req *req = tevent_req_callback_data(
1163                 subreq, struct tevent_req);
1164         struct cli_api_pipe_state *state = tevent_req_data(
1165                 req, struct cli_api_pipe_state);
1166         NTSTATUS status;
1167         ssize_t received;
1168
1169         status = state->transport->read_recv(subreq, &received);
1170         TALLOC_FREE(subreq);
1171         if (!NT_STATUS_IS_OK(status)) {
1172                 tevent_req_nterror(req, status);
1173                 return;
1174         }
1175         state->rdata_len = received;
1176         tevent_req_done(req);
1177 }
1178
1179 static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1180                                   uint8_t **prdata, uint32_t *prdata_len)
1181 {
1182         struct cli_api_pipe_state *state = tevent_req_data(
1183                 req, struct cli_api_pipe_state);
1184         NTSTATUS status;
1185
1186         if (tevent_req_is_nterror(req, &status)) {
1187                 return status;
1188         }
1189
1190         *prdata = talloc_move(mem_ctx, &state->rdata);
1191         *prdata_len = state->rdata_len;
1192         return NT_STATUS_OK;
1193 }
1194
1195 /****************************************************************************
1196  Send data on an rpc pipe via trans. The prs_struct data must be the last
1197  pdu fragment of an NDR data stream.
1198
1199  Receive response data from an rpc pipe, which may be large...
1200
1201  Read the first fragment: unfortunately have to use SMBtrans for the first
1202  bit, then SMBreadX for subsequent bits.
1203
1204  If first fragment received also wasn't the last fragment, continue
1205  getting fragments until we _do_ receive the last fragment.
1206
1207  Request/Response PDU's look like the following...
1208
1209  |<------------------PDU len----------------------------------------------->|
1210  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1211
1212  +------------+-----------------+-------------+---------------+-------------+
1213  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1214  +------------+-----------------+-------------+---------------+-------------+
1215
1216  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1217  signing & sealing being negotiated.
1218
1219  ****************************************************************************/
1220
1221 struct rpc_api_pipe_state {
1222         struct event_context *ev;
1223         struct rpc_pipe_client *cli;
1224         uint8_t expected_pkt_type;
1225
1226         prs_struct incoming_frag;
1227         struct rpc_hdr_info rhdr;
1228
1229         prs_struct incoming_pdu;        /* Incoming reply */
1230         uint32_t incoming_pdu_offset;
1231 };
1232
1233 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1234 {
1235         prs_mem_free(&state->incoming_frag);
1236         prs_mem_free(&state->incoming_pdu);
1237         return 0;
1238 }
1239
1240 static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
1241 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1242
1243 static struct tevent_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1244                                             struct event_context *ev,
1245                                             struct rpc_pipe_client *cli,
1246                                             prs_struct *data, /* Outgoing PDU */
1247                                             uint8_t expected_pkt_type)
1248 {
1249         struct tevent_req *req, *subreq;
1250         struct rpc_api_pipe_state *state;
1251         uint16_t max_recv_frag;
1252         NTSTATUS status;
1253
1254         req = tevent_req_create(mem_ctx, &state, struct rpc_api_pipe_state);
1255         if (req == NULL) {
1256                 return NULL;
1257         }
1258         state->ev = ev;
1259         state->cli = cli;
1260         state->expected_pkt_type = expected_pkt_type;
1261         state->incoming_pdu_offset = 0;
1262
1263         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1264
1265         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1266         /* Make incoming_pdu dynamic with no memory. */
1267         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1268
1269         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1270
1271         /*
1272          * Ensure we're not sending too much.
1273          */
1274         if (prs_offset(data) > cli->max_xmit_frag) {
1275                 status = NT_STATUS_INVALID_PARAMETER;
1276                 goto post_status;
1277         }
1278
1279         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1280
1281         max_recv_frag = cli->max_recv_frag;
1282
1283 #ifdef DEVELOPER
1284         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1285 #endif
1286
1287         subreq = cli_api_pipe_send(state, ev, cli->transport,
1288                                    (uint8_t *)prs_data_p(data),
1289                                    prs_offset(data), max_recv_frag);
1290         if (subreq == NULL) {
1291                 goto fail;
1292         }
1293         tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, req);
1294         return req;
1295
1296  post_status:
1297         tevent_req_nterror(req, status);
1298         return tevent_req_post(req, ev);
1299  fail:
1300         TALLOC_FREE(req);
1301         return NULL;
1302 }
1303
1304 static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
1305 {
1306         struct tevent_req *req = tevent_req_callback_data(
1307                 subreq, struct tevent_req);
1308         struct rpc_api_pipe_state *state = tevent_req_data(
1309                 req, struct rpc_api_pipe_state);
1310         NTSTATUS status;
1311         uint8_t *rdata = NULL;
1312         uint32_t rdata_len = 0;
1313         char *rdata_copy;
1314
1315         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1316         TALLOC_FREE(subreq);
1317         if (!NT_STATUS_IS_OK(status)) {
1318                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1319                 tevent_req_nterror(req, status);
1320                 return;
1321         }
1322
1323         if (rdata == NULL) {
1324                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1325                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1326                 tevent_req_done(req);
1327                 return;
1328         }
1329
1330         /*
1331          * Give the memory received from cli_trans as dynamic to the current
1332          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1333          * :-(
1334          */
1335         rdata_copy = (char *)memdup(rdata, rdata_len);
1336         TALLOC_FREE(rdata);
1337         if (tevent_req_nomem(rdata_copy, req)) {
1338                 return;
1339         }
1340         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1341
1342         /* Ensure we have enough data for a pdu. */
1343         subreq = get_complete_frag_send(state, state->ev, state->cli,
1344                                         &state->rhdr, &state->incoming_frag);
1345         if (tevent_req_nomem(subreq, req)) {
1346                 return;
1347         }
1348         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1349 }
1350
1351 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1352 {
1353         struct tevent_req *req = tevent_req_callback_data(
1354                 subreq, struct tevent_req);
1355         struct rpc_api_pipe_state *state = tevent_req_data(
1356                 req, struct rpc_api_pipe_state);
1357         NTSTATUS status;
1358         char *rdata = NULL;
1359         uint32_t rdata_len = 0;
1360
1361         status = get_complete_frag_recv(subreq);
1362         TALLOC_FREE(subreq);
1363         if (!NT_STATUS_IS_OK(status)) {
1364                 DEBUG(5, ("get_complete_frag failed: %s\n",
1365                           nt_errstr(status)));
1366                 tevent_req_nterror(req, status);
1367                 return;
1368         }
1369
1370         status = cli_pipe_validate_current_pdu(
1371                 state->cli, &state->rhdr, &state->incoming_frag,
1372                 state->expected_pkt_type, &rdata, &rdata_len,
1373                 &state->incoming_pdu);
1374
1375         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1376                   (unsigned)prs_data_size(&state->incoming_frag),
1377                   (unsigned)state->incoming_pdu_offset,
1378                   nt_errstr(status)));
1379
1380         if (!NT_STATUS_IS_OK(status)) {
1381                 tevent_req_nterror(req, status);
1382                 return;
1383         }
1384
1385         if ((state->rhdr.flags & RPC_FLG_FIRST)
1386             && (state->rhdr.pack_type[0] == 0)) {
1387                 /*
1388                  * Set the data type correctly for big-endian data on the
1389                  * first packet.
1390                  */
1391                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1392                           "big-endian.\n",
1393                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1394                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1395         }
1396         /*
1397          * Check endianness on subsequent packets.
1398          */
1399         if (state->incoming_frag.bigendian_data
1400             != state->incoming_pdu.bigendian_data) {
1401                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1402                          "%s\n",
1403                          state->incoming_pdu.bigendian_data?"big":"little",
1404                          state->incoming_frag.bigendian_data?"big":"little"));
1405                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1406                 return;
1407         }
1408
1409         /* Now copy the data portion out of the pdu into rbuf. */
1410         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1411                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1412                 return;
1413         }
1414
1415         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1416                rdata, (size_t)rdata_len);
1417         state->incoming_pdu_offset += rdata_len;
1418
1419         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1420                                             &state->incoming_frag);
1421         if (!NT_STATUS_IS_OK(status)) {
1422                 tevent_req_nterror(req, status);
1423                 return;
1424         }
1425
1426         if (state->rhdr.flags & RPC_FLG_LAST) {
1427                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1428                           rpccli_pipe_txt(debug_ctx(), state->cli),
1429                           (unsigned)prs_data_size(&state->incoming_pdu)));
1430                 tevent_req_done(req);
1431                 return;
1432         }
1433
1434         subreq = get_complete_frag_send(state, state->ev, state->cli,
1435                                         &state->rhdr, &state->incoming_frag);
1436         if (tevent_req_nomem(subreq, req)) {
1437                 return;
1438         }
1439         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1440 }
1441
1442 static NTSTATUS rpc_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1443                                   prs_struct *reply_pdu)
1444 {
1445         struct rpc_api_pipe_state *state = tevent_req_data(
1446                 req, struct rpc_api_pipe_state);
1447         NTSTATUS status;
1448
1449         if (tevent_req_is_nterror(req, &status)) {
1450                 return status;
1451         }
1452
1453         *reply_pdu = state->incoming_pdu;
1454         reply_pdu->mem_ctx = mem_ctx;
1455
1456         /*
1457          * Prevent state->incoming_pdu from being freed in
1458          * rpc_api_pipe_state_destructor()
1459          */
1460         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1461
1462         return NT_STATUS_OK;
1463 }
1464
1465 /*******************************************************************
1466  Creates krb5 auth bind.
1467  ********************************************************************/
1468
1469 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1470                                                 enum pipe_auth_level auth_level,
1471                                                 RPC_HDR_AUTH *pauth_out,
1472                                                 prs_struct *auth_data)
1473 {
1474 #ifdef HAVE_KRB5
1475         int ret;
1476         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1477         DATA_BLOB tkt = data_blob_null;
1478         DATA_BLOB tkt_wrapped = data_blob_null;
1479
1480         /* We may change the pad length before marshalling. */
1481         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1482
1483         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1484                 a->service_principal ));
1485
1486         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1487
1488         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1489                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1490
1491         if (ret) {
1492                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1493                         "failed with %s\n",
1494                         a->service_principal,
1495                         error_message(ret) ));
1496
1497                 data_blob_free(&tkt);
1498                 prs_mem_free(auth_data);
1499                 return NT_STATUS_INVALID_PARAMETER;
1500         }
1501
1502         /* wrap that up in a nice GSS-API wrapping */
1503         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1504
1505         data_blob_free(&tkt);
1506
1507         /* Auth len in the rpc header doesn't include auth_header. */
1508         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1509                 data_blob_free(&tkt_wrapped);
1510                 prs_mem_free(auth_data);
1511                 return NT_STATUS_NO_MEMORY;
1512         }
1513
1514         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1515         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1516
1517         data_blob_free(&tkt_wrapped);
1518         return NT_STATUS_OK;
1519 #else
1520         return NT_STATUS_INVALID_PARAMETER;
1521 #endif
1522 }
1523
1524 /*******************************************************************
1525  Creates SPNEGO NTLMSSP auth bind.
1526  ********************************************************************/
1527
1528 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1529                                                 enum pipe_auth_level auth_level,
1530                                                 RPC_HDR_AUTH *pauth_out,
1531                                                 prs_struct *auth_data)
1532 {
1533         NTSTATUS nt_status;
1534         DATA_BLOB null_blob = data_blob_null;
1535         DATA_BLOB request = data_blob_null;
1536         DATA_BLOB spnego_msg = data_blob_null;
1537
1538         /* We may change the pad length before marshalling. */
1539         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1540
1541         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1542         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1543                                         null_blob,
1544                                         &request);
1545
1546         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1547                 data_blob_free(&request);
1548                 prs_mem_free(auth_data);
1549                 return nt_status;
1550         }
1551
1552         /* Wrap this in SPNEGO. */
1553         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1554
1555         data_blob_free(&request);
1556
1557         /* Auth len in the rpc header doesn't include auth_header. */
1558         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1559                 data_blob_free(&spnego_msg);
1560                 prs_mem_free(auth_data);
1561                 return NT_STATUS_NO_MEMORY;
1562         }
1563
1564         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1565         dump_data(5, spnego_msg.data, spnego_msg.length);
1566
1567         data_blob_free(&spnego_msg);
1568         return NT_STATUS_OK;
1569 }
1570
1571 /*******************************************************************
1572  Creates NTLMSSP auth bind.
1573  ********************************************************************/
1574
1575 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1576                                                 enum pipe_auth_level auth_level,
1577                                                 RPC_HDR_AUTH *pauth_out,
1578                                                 prs_struct *auth_data)
1579 {
1580         NTSTATUS nt_status;
1581         DATA_BLOB null_blob = data_blob_null;
1582         DATA_BLOB request = data_blob_null;
1583
1584         /* We may change the pad length before marshalling. */
1585         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1586
1587         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1588         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1589                                         null_blob,
1590                                         &request);
1591
1592         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1593                 data_blob_free(&request);
1594                 prs_mem_free(auth_data);
1595                 return nt_status;
1596         }
1597
1598         /* Auth len in the rpc header doesn't include auth_header. */
1599         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1600                 data_blob_free(&request);
1601                 prs_mem_free(auth_data);
1602                 return NT_STATUS_NO_MEMORY;
1603         }
1604
1605         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1606         dump_data(5, request.data, request.length);
1607
1608         data_blob_free(&request);
1609         return NT_STATUS_OK;
1610 }
1611
1612 /*******************************************************************
1613  Creates schannel auth bind.
1614  ********************************************************************/
1615
1616 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1617                                                 enum pipe_auth_level auth_level,
1618                                                 RPC_HDR_AUTH *pauth_out,
1619                                                 prs_struct *auth_data)
1620 {
1621         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1622
1623         /* We may change the pad length before marshalling. */
1624         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1625
1626         /* Use lp_workgroup() if domain not specified */
1627
1628         if (!cli->auth->domain || !cli->auth->domain[0]) {
1629                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1630                 if (cli->auth->domain == NULL) {
1631                         return NT_STATUS_NO_MEMORY;
1632                 }
1633         }
1634
1635         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1636                                    global_myname());
1637
1638         /*
1639          * Now marshall the data into the auth parse_struct.
1640          */
1641
1642         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1643                                        &schannel_neg, auth_data, 0)) {
1644                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1645                 prs_mem_free(auth_data);
1646                 return NT_STATUS_NO_MEMORY;
1647         }
1648
1649         return NT_STATUS_OK;
1650 }
1651
1652 /*******************************************************************
1653  Creates the internals of a DCE/RPC bind request or alter context PDU.
1654  ********************************************************************/
1655
1656 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1657                                                 prs_struct *rpc_out, 
1658                                                 uint32 rpc_call_id,
1659                                                 const RPC_IFACE *abstract,
1660                                                 const RPC_IFACE *transfer,
1661                                                 RPC_HDR_AUTH *phdr_auth,
1662                                                 prs_struct *pauth_info)
1663 {
1664         RPC_HDR hdr;
1665         RPC_HDR_RB hdr_rb;
1666         RPC_CONTEXT rpc_ctx;
1667         uint16 auth_len = prs_offset(pauth_info);
1668         uint8 ss_padding_len = 0;
1669         uint16 frag_len = 0;
1670
1671         /* create the RPC context. */
1672         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1673
1674         /* create the bind request RPC_HDR_RB */
1675         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1676
1677         /* Start building the frag length. */
1678         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1679
1680         /* Do we need to pad ? */
1681         if (auth_len) {
1682                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1683                 if (data_len % 8) {
1684                         ss_padding_len = 8 - (data_len % 8);
1685                         phdr_auth->auth_pad_len = ss_padding_len;
1686                 }
1687                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1688         }
1689
1690         /* Create the request RPC_HDR */
1691         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1692
1693         /* Marshall the RPC header */
1694         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1695                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1696                 return NT_STATUS_NO_MEMORY;
1697         }
1698
1699         /* Marshall the bind request data */
1700         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1701                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1702                 return NT_STATUS_NO_MEMORY;
1703         }
1704
1705         /*
1706          * Grow the outgoing buffer to store any auth info.
1707          */
1708
1709         if(auth_len != 0) {
1710                 if (ss_padding_len) {
1711                         char pad[8];
1712                         memset(pad, '\0', 8);
1713                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1714                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1715                                 return NT_STATUS_NO_MEMORY;
1716                         }
1717                 }
1718
1719                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1720                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1721                         return NT_STATUS_NO_MEMORY;
1722                 }
1723
1724
1725                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1726                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1727                         return NT_STATUS_NO_MEMORY;
1728                 }
1729         }
1730
1731         return NT_STATUS_OK;
1732 }
1733
1734 /*******************************************************************
1735  Creates a DCE/RPC bind request.
1736  ********************************************************************/
1737
1738 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1739                                 prs_struct *rpc_out, 
1740                                 uint32 rpc_call_id,
1741                                 const RPC_IFACE *abstract,
1742                                 const RPC_IFACE *transfer,
1743                                 enum pipe_auth_type auth_type,
1744                                 enum pipe_auth_level auth_level)
1745 {
1746         RPC_HDR_AUTH hdr_auth;
1747         prs_struct auth_info;
1748         NTSTATUS ret = NT_STATUS_OK;
1749
1750         ZERO_STRUCT(hdr_auth);
1751         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1752                 return NT_STATUS_NO_MEMORY;
1753
1754         switch (auth_type) {
1755                 case PIPE_AUTH_TYPE_SCHANNEL:
1756                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1757                         if (!NT_STATUS_IS_OK(ret)) {
1758                                 prs_mem_free(&auth_info);
1759                                 return ret;
1760                         }
1761                         break;
1762
1763                 case PIPE_AUTH_TYPE_NTLMSSP:
1764                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1765                         if (!NT_STATUS_IS_OK(ret)) {
1766                                 prs_mem_free(&auth_info);
1767                                 return ret;
1768                         }
1769                         break;
1770
1771                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1772                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1773                         if (!NT_STATUS_IS_OK(ret)) {
1774                                 prs_mem_free(&auth_info);
1775                                 return ret;
1776                         }
1777                         break;
1778
1779                 case PIPE_AUTH_TYPE_KRB5:
1780                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1781                         if (!NT_STATUS_IS_OK(ret)) {
1782                                 prs_mem_free(&auth_info);
1783                                 return ret;
1784                         }
1785                         break;
1786
1787                 case PIPE_AUTH_TYPE_NONE:
1788                         break;
1789
1790                 default:
1791                         /* "Can't" happen. */
1792                         return NT_STATUS_INVALID_INFO_CLASS;
1793         }
1794
1795         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1796                                                 rpc_out, 
1797                                                 rpc_call_id,
1798                                                 abstract,
1799                                                 transfer,
1800                                                 &hdr_auth,
1801                                                 &auth_info);
1802
1803         prs_mem_free(&auth_info);
1804         return ret;
1805 }
1806
1807 /*******************************************************************
1808  Create and add the NTLMSSP sign/seal auth header and data.
1809  ********************************************************************/
1810
1811 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1812                                         RPC_HDR *phdr,
1813                                         uint32 ss_padding_len,
1814                                         prs_struct *outgoing_pdu)
1815 {
1816         RPC_HDR_AUTH auth_info;
1817         NTSTATUS status;
1818         DATA_BLOB auth_blob = data_blob_null;
1819         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1820
1821         if (!cli->auth->a_u.ntlmssp_state) {
1822                 return NT_STATUS_INVALID_PARAMETER;
1823         }
1824
1825         /* Init and marshall the auth header. */
1826         init_rpc_hdr_auth(&auth_info,
1827                         map_pipe_auth_type_to_rpc_auth_type(
1828                                 cli->auth->auth_type),
1829                         cli->auth->auth_level,
1830                         ss_padding_len,
1831                         1 /* context id. */);
1832
1833         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1834                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1835                 data_blob_free(&auth_blob);
1836                 return NT_STATUS_NO_MEMORY;
1837         }
1838
1839         switch (cli->auth->auth_level) {
1840                 case PIPE_AUTH_LEVEL_PRIVACY:
1841                         /* Data portion is encrypted. */
1842                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1843                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1844                                         data_and_pad_len,
1845                                         (unsigned char *)prs_data_p(outgoing_pdu),
1846                                         (size_t)prs_offset(outgoing_pdu),
1847                                         &auth_blob);
1848                         if (!NT_STATUS_IS_OK(status)) {
1849                                 data_blob_free(&auth_blob);
1850                                 return status;
1851                         }
1852                         break;
1853
1854                 case PIPE_AUTH_LEVEL_INTEGRITY:
1855                         /* Data is signed. */
1856                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1857                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1858                                         data_and_pad_len,
1859                                         (unsigned char *)prs_data_p(outgoing_pdu),
1860                                         (size_t)prs_offset(outgoing_pdu),
1861                                         &auth_blob);
1862                         if (!NT_STATUS_IS_OK(status)) {
1863                                 data_blob_free(&auth_blob);
1864                                 return status;
1865                         }
1866                         break;
1867
1868                 default:
1869                         /* Can't happen. */
1870                         smb_panic("bad auth level");
1871                         /* Notreached. */
1872                         return NT_STATUS_INVALID_PARAMETER;
1873         }
1874
1875         /* Finally marshall the blob. */
1876
1877         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1878                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1879                         (unsigned int)NTLMSSP_SIG_SIZE));
1880                 data_blob_free(&auth_blob);
1881                 return NT_STATUS_NO_MEMORY;
1882         }
1883
1884         data_blob_free(&auth_blob);
1885         return NT_STATUS_OK;
1886 }
1887
1888 /*******************************************************************
1889  Create and add the schannel sign/seal auth header and data.
1890  ********************************************************************/
1891
1892 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1893                                         RPC_HDR *phdr,
1894                                         uint32 ss_padding_len,
1895                                         prs_struct *outgoing_pdu)
1896 {
1897         RPC_HDR_AUTH auth_info;
1898         RPC_AUTH_SCHANNEL_CHK verf;
1899         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1900         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1901         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1902
1903         if (!sas) {
1904                 return NT_STATUS_INVALID_PARAMETER;
1905         }
1906
1907         /* Init and marshall the auth header. */
1908         init_rpc_hdr_auth(&auth_info,
1909                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1910                         cli->auth->auth_level,
1911                         ss_padding_len,
1912                         1 /* context id. */);
1913
1914         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1915                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1916                 return NT_STATUS_NO_MEMORY;
1917         }
1918
1919         switch (cli->auth->auth_level) {
1920                 case PIPE_AUTH_LEVEL_PRIVACY:
1921                 case PIPE_AUTH_LEVEL_INTEGRITY:
1922                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1923                                 sas->seq_num));
1924
1925                         schannel_encode(sas,
1926                                         cli->auth->auth_level,
1927                                         SENDER_IS_INITIATOR,
1928                                         &verf,
1929                                         data_p,
1930                                         data_and_pad_len);
1931
1932                         sas->seq_num++;
1933                         break;
1934
1935                 default:
1936                         /* Can't happen. */
1937                         smb_panic("bad auth level");
1938                         /* Notreached. */
1939                         return NT_STATUS_INVALID_PARAMETER;
1940         }
1941
1942         /* Finally marshall the blob. */
1943         smb_io_rpc_auth_schannel_chk("",
1944                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1945                         &verf,
1946                         outgoing_pdu,
1947                         0);
1948
1949         return NT_STATUS_OK;
1950 }
1951
1952 /*******************************************************************
1953  Calculate how much data we're going to send in this packet, also
1954  work out any sign/seal padding length.
1955  ********************************************************************/
1956
1957 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1958                                         uint32 data_left,
1959                                         uint16 *p_frag_len,
1960                                         uint16 *p_auth_len,
1961                                         uint32 *p_ss_padding)
1962 {
1963         uint32 data_space, data_len;
1964
1965 #ifdef DEVELOPER
1966         if ((data_left > 0) && (sys_random() % 2)) {
1967                 data_left = MAX(data_left/2, 1);
1968         }
1969 #endif
1970
1971         switch (cli->auth->auth_level) {
1972                 case PIPE_AUTH_LEVEL_NONE:
1973                 case PIPE_AUTH_LEVEL_CONNECT:
1974                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1975                         data_len = MIN(data_space, data_left);
1976                         *p_ss_padding = 0;
1977                         *p_auth_len = 0;
1978                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1979                         return data_len;
1980
1981                 case PIPE_AUTH_LEVEL_INTEGRITY:
1982                 case PIPE_AUTH_LEVEL_PRIVACY:
1983                         /* Treat the same for all authenticated rpc requests. */
1984                         switch(cli->auth->auth_type) {
1985                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1986                                 case PIPE_AUTH_TYPE_NTLMSSP:
1987                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1988                                         break;
1989                                 case PIPE_AUTH_TYPE_SCHANNEL:
1990                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1991                                         break;
1992                                 default:
1993                                         smb_panic("bad auth type");
1994                                         break;
1995                         }
1996
1997                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
1998                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
1999
2000                         data_len = MIN(data_space, data_left);
2001                         *p_ss_padding = 0;
2002                         if (data_len % 8) {
2003                                 *p_ss_padding = 8 - (data_len % 8);
2004                         }
2005                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2006                                         data_len + *p_ss_padding +              /* data plus padding. */
2007                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2008                         return data_len;
2009
2010                 default:
2011                         smb_panic("bad auth level");
2012                         /* Notreached. */
2013                         return 0;
2014         }
2015 }
2016
2017 /*******************************************************************
2018  External interface.
2019  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2020  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2021  and deals with signing/sealing details.
2022  ********************************************************************/
2023
2024 struct rpc_api_pipe_req_state {
2025         struct event_context *ev;
2026         struct rpc_pipe_client *cli;
2027         uint8_t op_num;
2028         uint32_t call_id;
2029         prs_struct *req_data;
2030         uint32_t req_data_sent;
2031         prs_struct outgoing_frag;
2032         prs_struct reply_pdu;
2033 };
2034
2035 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2036 {
2037         prs_mem_free(&s->outgoing_frag);
2038         prs_mem_free(&s->reply_pdu);
2039         return 0;
2040 }
2041
2042 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2043 static void rpc_api_pipe_req_done(struct tevent_req *subreq);
2044 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2045                                   bool *is_last_frag);
2046
2047 struct tevent_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2048                                          struct event_context *ev,
2049                                          struct rpc_pipe_client *cli,
2050                                          uint8_t op_num,
2051                                          prs_struct *req_data)
2052 {
2053         struct tevent_req *req, *subreq;
2054         struct rpc_api_pipe_req_state *state;
2055         NTSTATUS status;
2056         bool is_last_frag;
2057
2058         req = tevent_req_create(mem_ctx, &state,
2059                                 struct rpc_api_pipe_req_state);
2060         if (req == NULL) {
2061                 return NULL;
2062         }
2063         state->ev = ev;
2064         state->cli = cli;
2065         state->op_num = op_num;
2066         state->req_data = req_data;
2067         state->req_data_sent = 0;
2068         state->call_id = get_rpc_call_id();
2069
2070         if (cli->max_xmit_frag
2071             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2072                 /* Server is screwed up ! */
2073                 status = NT_STATUS_INVALID_PARAMETER;
2074                 goto post_status;
2075         }
2076
2077         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2078
2079         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2080                       state, MARSHALL)) {
2081                 goto fail;
2082         }
2083
2084         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2085
2086         status = prepare_next_frag(state, &is_last_frag);
2087         if (!NT_STATUS_IS_OK(status)) {
2088                 goto post_status;
2089         }
2090
2091         if (is_last_frag) {
2092                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2093                                            &state->outgoing_frag,
2094                                            RPC_RESPONSE);
2095                 if (subreq == NULL) {
2096                         goto fail;
2097                 }
2098                 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2099         } else {
2100                 subreq = rpc_write_send(
2101                         state, ev, cli->transport,
2102                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2103                         prs_offset(&state->outgoing_frag));
2104                 if (subreq == NULL) {
2105                         goto fail;
2106                 }
2107                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2108                                         req);
2109         }
2110         return req;
2111
2112  post_status:
2113         tevent_req_nterror(req, status);
2114         return tevent_req_post(req, ev);
2115  fail:
2116         TALLOC_FREE(req);
2117         return NULL;
2118 }
2119
2120 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2121                                   bool *is_last_frag)
2122 {
2123         RPC_HDR hdr;
2124         RPC_HDR_REQ hdr_req;
2125         uint32_t data_sent_thistime;
2126         uint16_t auth_len;
2127         uint16_t frag_len;
2128         uint8_t flags = 0;
2129         uint32_t ss_padding;
2130         uint32_t data_left;
2131         char pad[8] = { 0, };
2132         NTSTATUS status;
2133
2134         data_left = prs_offset(state->req_data) - state->req_data_sent;
2135
2136         data_sent_thistime = calculate_data_len_tosend(
2137                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2138
2139         if (state->req_data_sent == 0) {
2140                 flags = RPC_FLG_FIRST;
2141         }
2142
2143         if (data_sent_thistime == data_left) {
2144                 flags |= RPC_FLG_LAST;
2145         }
2146
2147         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2148                 return NT_STATUS_NO_MEMORY;
2149         }
2150
2151         /* Create and marshall the header and request header. */
2152         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2153                      auth_len);
2154
2155         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2156                 return NT_STATUS_NO_MEMORY;
2157         }
2158
2159         /* Create the rpc request RPC_HDR_REQ */
2160         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2161                          state->op_num);
2162
2163         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2164                                 &state->outgoing_frag, 0)) {
2165                 return NT_STATUS_NO_MEMORY;
2166         }
2167
2168         /* Copy in the data, plus any ss padding. */
2169         if (!prs_append_some_prs_data(&state->outgoing_frag,
2170                                       state->req_data, state->req_data_sent,
2171                                       data_sent_thistime)) {
2172                 return NT_STATUS_NO_MEMORY;
2173         }
2174
2175         /* Copy the sign/seal padding data. */
2176         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2177                 return NT_STATUS_NO_MEMORY;
2178         }
2179
2180         /* Generate any auth sign/seal and add the auth footer. */
2181         switch (state->cli->auth->auth_type) {
2182         case PIPE_AUTH_TYPE_NONE:
2183                 status = NT_STATUS_OK;
2184                 break;
2185         case PIPE_AUTH_TYPE_NTLMSSP:
2186         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2187                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2188                                                  &state->outgoing_frag);
2189                 break;
2190         case PIPE_AUTH_TYPE_SCHANNEL:
2191                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2192                                                   &state->outgoing_frag);
2193                 break;
2194         default:
2195                 status = NT_STATUS_INVALID_PARAMETER;
2196                 break;
2197         }
2198
2199         state->req_data_sent += data_sent_thistime;
2200         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2201
2202         return status;
2203 }
2204
2205 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2206 {
2207         struct tevent_req *req = tevent_req_callback_data(
2208                 subreq, struct tevent_req);
2209         struct rpc_api_pipe_req_state *state = tevent_req_data(
2210                 req, struct rpc_api_pipe_req_state);
2211         NTSTATUS status;
2212         bool is_last_frag;
2213
2214         status = rpc_write_recv(subreq);
2215         TALLOC_FREE(subreq);
2216         if (!NT_STATUS_IS_OK(status)) {
2217                 tevent_req_nterror(req, status);
2218                 return;
2219         }
2220
2221         status = prepare_next_frag(state, &is_last_frag);
2222         if (!NT_STATUS_IS_OK(status)) {
2223                 tevent_req_nterror(req, status);
2224                 return;
2225         }
2226
2227         if (is_last_frag) {
2228                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2229                                            &state->outgoing_frag,
2230                                            RPC_RESPONSE);
2231                 if (tevent_req_nomem(subreq, req)) {
2232                         return;
2233                 }
2234                 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2235         } else {
2236                 subreq = rpc_write_send(
2237                         state, state->ev,
2238                         state->cli->transport,
2239                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2240                         prs_offset(&state->outgoing_frag));
2241                 if (tevent_req_nomem(subreq, req)) {
2242                         return;
2243                 }
2244                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2245                                         req);
2246         }
2247 }
2248
2249 static void rpc_api_pipe_req_done(struct tevent_req *subreq)
2250 {
2251         struct tevent_req *req = tevent_req_callback_data(
2252                 subreq, struct tevent_req);
2253         struct rpc_api_pipe_req_state *state = tevent_req_data(
2254                 req, struct rpc_api_pipe_req_state);
2255         NTSTATUS status;
2256
2257         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2258         TALLOC_FREE(subreq);
2259         if (!NT_STATUS_IS_OK(status)) {
2260                 tevent_req_nterror(req, status);
2261                 return;
2262         }
2263         tevent_req_done(req);
2264 }
2265
2266 NTSTATUS rpc_api_pipe_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2267                                prs_struct *reply_pdu)
2268 {
2269         struct rpc_api_pipe_req_state *state = tevent_req_data(
2270                 req, struct rpc_api_pipe_req_state);
2271         NTSTATUS status;
2272
2273         if (tevent_req_is_nterror(req, &status)) {
2274                 /*
2275                  * We always have to initialize to reply pdu, even if there is
2276                  * none. The rpccli_* caller routines expect this.
2277                  */
2278                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2279                 return status;
2280         }
2281
2282         *reply_pdu = state->reply_pdu;
2283         reply_pdu->mem_ctx = mem_ctx;
2284
2285         /*
2286          * Prevent state->req_pdu from being freed in
2287          * rpc_api_pipe_req_state_destructor()
2288          */
2289         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2290
2291         return NT_STATUS_OK;
2292 }
2293
2294 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2295                         uint8 op_num,
2296                         prs_struct *in_data,
2297                         prs_struct *out_data)
2298 {
2299         TALLOC_CTX *frame = talloc_stackframe();
2300         struct event_context *ev;
2301         struct tevent_req *req;
2302         NTSTATUS status = NT_STATUS_NO_MEMORY;
2303
2304         ev = event_context_init(frame);
2305         if (ev == NULL) {
2306                 goto fail;
2307         }
2308
2309         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2310         if (req == NULL) {
2311                 goto fail;
2312         }
2313
2314         tevent_req_poll(req, ev);
2315
2316         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2317  fail:
2318         TALLOC_FREE(frame);
2319         return status;
2320 }
2321
2322 #if 0
2323 /****************************************************************************
2324  Set the handle state.
2325 ****************************************************************************/
2326
2327 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2328                                    const char *pipe_name, uint16 device_state)
2329 {
2330         bool state_set = False;
2331         char param[2];
2332         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2333         char *rparam = NULL;
2334         char *rdata = NULL;
2335         uint32 rparam_len, rdata_len;
2336
2337         if (pipe_name == NULL)
2338                 return False;
2339
2340         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2341                  cli->fnum, pipe_name, device_state));
2342
2343         /* create parameters: device state */
2344         SSVAL(param, 0, device_state);
2345
2346         /* create setup parameters. */
2347         setup[0] = 0x0001; 
2348         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2349
2350         /* send the data on \PIPE\ */
2351         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2352                     setup, 2, 0,                /* setup, length, max */
2353                     param, 2, 0,                /* param, length, max */
2354                     NULL, 0, 1024,              /* data, length, max */
2355                     &rparam, &rparam_len,        /* return param, length */
2356                     &rdata, &rdata_len))         /* return data, length */
2357         {
2358                 DEBUG(5, ("Set Handle state: return OK\n"));
2359                 state_set = True;
2360         }
2361
2362         SAFE_FREE(rparam);
2363         SAFE_FREE(rdata);
2364
2365         return state_set;
2366 }
2367 #endif
2368
2369 /****************************************************************************
2370  Check the rpc bind acknowledge response.
2371 ****************************************************************************/
2372
2373 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2374 {
2375         if ( hdr_ba->addr.len == 0) {
2376                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2377         }
2378
2379         /* check the transfer syntax */
2380         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2381              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2382                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2383                 return False;
2384         }
2385
2386         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2387                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2388                           hdr_ba->res.num_results, hdr_ba->res.reason));
2389         }
2390
2391         DEBUG(5,("check_bind_response: accepted!\n"));
2392         return True;
2393 }
2394
2395 /*******************************************************************
2396  Creates a DCE/RPC bind authentication response.
2397  This is the packet that is sent back to the server once we
2398  have received a BIND-ACK, to finish the third leg of
2399  the authentication handshake.
2400  ********************************************************************/
2401
2402 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2403                                 uint32 rpc_call_id,
2404                                 enum pipe_auth_type auth_type,
2405                                 enum pipe_auth_level auth_level,
2406                                 DATA_BLOB *pauth_blob,
2407                                 prs_struct *rpc_out)
2408 {
2409         RPC_HDR hdr;
2410         RPC_HDR_AUTH hdr_auth;
2411         uint32 pad = 0;
2412
2413         /* Create the request RPC_HDR */
2414         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2415                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2416                      pauth_blob->length );
2417
2418         /* Marshall it. */
2419         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2420                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2421                 return NT_STATUS_NO_MEMORY;
2422         }
2423
2424         /*
2425                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2426                 about padding - shouldn't this pad to length 8 ? JRA.
2427         */
2428
2429         /* 4 bytes padding. */
2430         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2431                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2432                 return NT_STATUS_NO_MEMORY;
2433         }
2434
2435         /* Create the request RPC_HDR_AUTHA */
2436         init_rpc_hdr_auth(&hdr_auth,
2437                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2438                         auth_level, 0, 1);
2439
2440         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2441                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2442                 return NT_STATUS_NO_MEMORY;
2443         }
2444
2445         /*
2446          * Append the auth data to the outgoing buffer.
2447          */
2448
2449         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2450                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2451                 return NT_STATUS_NO_MEMORY;
2452         }
2453
2454         return NT_STATUS_OK;
2455 }
2456
2457 /*******************************************************************
2458  Creates a DCE/RPC bind alter context authentication request which
2459  may contain a spnego auth blobl
2460  ********************************************************************/
2461
2462 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2463                                         const RPC_IFACE *abstract,
2464                                         const RPC_IFACE *transfer,
2465                                         enum pipe_auth_level auth_level,
2466                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2467                                         prs_struct *rpc_out)
2468 {
2469         RPC_HDR_AUTH hdr_auth;
2470         prs_struct auth_info;
2471         NTSTATUS ret = NT_STATUS_OK;
2472
2473         ZERO_STRUCT(hdr_auth);
2474         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2475                 return NT_STATUS_NO_MEMORY;
2476
2477         /* We may change the pad length before marshalling. */
2478         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2479
2480         if (pauth_blob->length) {
2481                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2482                         prs_mem_free(&auth_info);
2483                         return NT_STATUS_NO_MEMORY;
2484                 }
2485         }
2486
2487         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2488                                                 rpc_out, 
2489                                                 rpc_call_id,
2490                                                 abstract,
2491                                                 transfer,
2492                                                 &hdr_auth,
2493                                                 &auth_info);
2494         prs_mem_free(&auth_info);
2495         return ret;
2496 }
2497
2498 /****************************************************************************
2499  Do an rpc bind.
2500 ****************************************************************************/
2501
2502 struct rpc_pipe_bind_state {
2503         struct event_context *ev;
2504         struct rpc_pipe_client *cli;
2505         prs_struct rpc_out;
2506         uint32_t rpc_call_id;
2507 };
2508
2509 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2510 {
2511         prs_mem_free(&state->rpc_out);
2512         return 0;
2513 }
2514
2515 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq);
2516 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2517                                            struct rpc_pipe_bind_state *state,
2518                                            struct rpc_hdr_info *phdr,
2519                                            prs_struct *reply_pdu);
2520 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2521 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2522                                                     struct rpc_pipe_bind_state *state,
2523                                                     struct rpc_hdr_info *phdr,
2524                                                     prs_struct *reply_pdu);
2525 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq);
2526
2527 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2528                                      struct event_context *ev,
2529                                      struct rpc_pipe_client *cli,
2530                                      struct cli_pipe_auth_data *auth)
2531 {
2532         struct async_req *result;
2533         struct tevent_req *subreq;
2534         struct rpc_pipe_bind_state *state;
2535         NTSTATUS status;
2536
2537         if (!async_req_setup(mem_ctx, &result, &state,
2538                              struct rpc_pipe_bind_state)) {
2539                 return NULL;
2540         }
2541
2542         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2543                 rpccli_pipe_txt(debug_ctx(), cli),
2544                 (unsigned int)auth->auth_type,
2545                 (unsigned int)auth->auth_level ));
2546
2547         state->ev = ev;
2548         state->cli = cli;
2549         state->rpc_call_id = get_rpc_call_id();
2550
2551         prs_init_empty(&state->rpc_out, state, MARSHALL);
2552         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2553
2554         cli->auth = talloc_move(cli, &auth);
2555
2556         /* Marshall the outgoing data. */
2557         status = create_rpc_bind_req(cli, &state->rpc_out,
2558                                      state->rpc_call_id,
2559                                      &cli->abstract_syntax,
2560                                      &cli->transfer_syntax,
2561                                      cli->auth->auth_type,
2562                                      cli->auth->auth_level);
2563
2564         if (!NT_STATUS_IS_OK(status)) {
2565                 goto post_status;
2566         }
2567
2568         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2569                                    RPC_BINDACK);
2570         if (subreq == NULL) {
2571                 goto fail;
2572         }
2573         tevent_req_set_callback(subreq, rpc_pipe_bind_step_one_done, result);
2574         return result;
2575
2576  post_status:
2577         if (async_post_ntstatus(result, ev, status)) {
2578                 return result;
2579         }
2580  fail:
2581         TALLOC_FREE(result);
2582         return NULL;
2583 }
2584
2585 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq)
2586 {
2587         struct async_req *req = tevent_req_callback_data(
2588                 subreq, struct async_req);
2589         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2590                 req->private_data, struct rpc_pipe_bind_state);
2591         prs_struct reply_pdu;
2592         struct rpc_hdr_info hdr;
2593         struct rpc_hdr_ba_info hdr_ba;
2594         NTSTATUS status;
2595
2596         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2597         TALLOC_FREE(subreq);
2598         if (!NT_STATUS_IS_OK(status)) {
2599                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2600                           rpccli_pipe_txt(debug_ctx(), state->cli),
2601                           nt_errstr(status)));
2602                 async_req_nterror(req, status);
2603                 return;
2604         }
2605
2606         /* Unmarshall the RPC header */
2607         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2608                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2609                 prs_mem_free(&reply_pdu);
2610                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2611                 return;
2612         }
2613
2614         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2615                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2616                           "RPC_HDR_BA.\n"));
2617                 prs_mem_free(&reply_pdu);
2618                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2619                 return;
2620         }
2621
2622         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2623                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2624                 prs_mem_free(&reply_pdu);
2625                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2626                 return;
2627         }
2628
2629         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2630         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2631
2632         /*
2633          * For authenticated binds we may need to do 3 or 4 leg binds.
2634          */
2635
2636         switch(state->cli->auth->auth_type) {
2637
2638         case PIPE_AUTH_TYPE_NONE:
2639         case PIPE_AUTH_TYPE_SCHANNEL:
2640                 /* Bind complete. */
2641                 prs_mem_free(&reply_pdu);
2642                 async_req_done(req);
2643                 break;
2644
2645         case PIPE_AUTH_TYPE_NTLMSSP:
2646                 /* Need to send AUTH3 packet - no reply. */
2647                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2648                                                     &reply_pdu);
2649                 prs_mem_free(&reply_pdu);
2650                 if (!NT_STATUS_IS_OK(status)) {
2651                         async_req_nterror(req, status);
2652                 }
2653                 break;
2654
2655         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2656                 /* Need to send alter context request and reply. */
2657                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2658                                                              &reply_pdu);
2659                 prs_mem_free(&reply_pdu);
2660                 if (!NT_STATUS_IS_OK(status)) {
2661                         async_req_nterror(req, status);
2662                 }
2663                 break;
2664
2665         case PIPE_AUTH_TYPE_KRB5:
2666                 /* */
2667
2668         default:
2669                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2670                          (unsigned int)state->cli->auth->auth_type));
2671                 prs_mem_free(&reply_pdu);
2672                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2673         }
2674 }
2675
2676 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2677                                            struct rpc_pipe_bind_state *state,
2678                                            struct rpc_hdr_info *phdr,
2679                                            prs_struct *reply_pdu)
2680 {
2681         DATA_BLOB server_response = data_blob_null;
2682         DATA_BLOB client_reply = data_blob_null;
2683         struct rpc_hdr_auth_info hdr_auth;
2684         struct tevent_req *subreq;
2685         NTSTATUS status;
2686
2687         if ((phdr->auth_len == 0)
2688             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2689                 return NT_STATUS_INVALID_PARAMETER;
2690         }
2691
2692         if (!prs_set_offset(
2693                     reply_pdu,
2694                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2695                 return NT_STATUS_INVALID_PARAMETER;
2696         }
2697
2698         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2699                 return NT_STATUS_INVALID_PARAMETER;
2700         }
2701
2702         /* TODO - check auth_type/auth_level match. */
2703
2704         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2705         prs_copy_data_out((char *)server_response.data, reply_pdu,
2706                           phdr->auth_len);
2707
2708         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2709                                 server_response, &client_reply);
2710
2711         if (!NT_STATUS_IS_OK(status)) {
2712                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2713                           "blob failed: %s.\n", nt_errstr(status)));
2714                 return status;
2715         }
2716
2717         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2718
2719         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2720                                        state->cli->auth->auth_type,
2721                                        state->cli->auth->auth_level,
2722                                        &client_reply, &state->rpc_out);
2723         data_blob_free(&client_reply);
2724
2725         if (!NT_STATUS_IS_OK(status)) {
2726                 return status;
2727         }
2728
2729         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2730                                 (uint8_t *)prs_data_p(&state->rpc_out),
2731                                 prs_offset(&state->rpc_out));
2732         if (subreq == NULL) {
2733                 return NT_STATUS_NO_MEMORY;
2734         }
2735         tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2736         return NT_STATUS_OK;
2737 }
2738
2739 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2740 {
2741         struct async_req *req = tevent_req_callback_data(
2742                 subreq, struct async_req);
2743         NTSTATUS status;
2744
2745         status = rpc_write_recv(subreq);
2746         TALLOC_FREE(subreq);
2747         if (!NT_STATUS_IS_OK(status)) {
2748                 async_req_nterror(req, status);
2749                 return;
2750         }
2751         async_req_done(req);
2752 }
2753
2754 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2755                                                     struct rpc_pipe_bind_state *state,
2756                                                     struct rpc_hdr_info *phdr,
2757                                                     prs_struct *reply_pdu)
2758 {
2759         DATA_BLOB server_spnego_response = data_blob_null;
2760         DATA_BLOB server_ntlm_response = data_blob_null;
2761         DATA_BLOB client_reply = data_blob_null;
2762         DATA_BLOB tmp_blob = data_blob_null;
2763         RPC_HDR_AUTH hdr_auth;
2764         struct tevent_req *subreq;
2765         NTSTATUS status;
2766
2767         if ((phdr->auth_len == 0)
2768             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2769                 return NT_STATUS_INVALID_PARAMETER;
2770         }
2771
2772         /* Process the returned NTLMSSP blob first. */
2773         if (!prs_set_offset(
2774                     reply_pdu,
2775                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2776                 return NT_STATUS_INVALID_PARAMETER;
2777         }
2778
2779         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2780                 return NT_STATUS_INVALID_PARAMETER;
2781         }
2782
2783         server_spnego_response = data_blob(NULL, phdr->auth_len);
2784         prs_copy_data_out((char *)server_spnego_response.data,
2785                           reply_pdu, phdr->auth_len);
2786
2787         /*
2788          * The server might give us back two challenges - tmp_blob is for the
2789          * second.
2790          */
2791         if (!spnego_parse_challenge(server_spnego_response,
2792                                     &server_ntlm_response, &tmp_blob)) {
2793                 data_blob_free(&server_spnego_response);
2794                 data_blob_free(&server_ntlm_response);
2795                 data_blob_free(&tmp_blob);
2796                 return NT_STATUS_INVALID_PARAMETER;
2797         }
2798
2799         /* We're finished with the server spnego response and the tmp_blob. */
2800         data_blob_free(&server_spnego_response);
2801         data_blob_free(&tmp_blob);
2802
2803         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2804                                 server_ntlm_response, &client_reply);
2805
2806         /* Finished with the server_ntlm response */
2807         data_blob_free(&server_ntlm_response);
2808
2809         if (!NT_STATUS_IS_OK(status)) {
2810                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2811                           "using server blob failed.\n"));
2812                 data_blob_free(&client_reply);
2813                 return status;
2814         }
2815
2816         /* SPNEGO wrap the client reply. */
2817         tmp_blob = spnego_gen_auth(client_reply);
2818         data_blob_free(&client_reply);
2819         client_reply = tmp_blob;
2820         tmp_blob = data_blob_null;
2821
2822         /* Now prepare the alter context pdu. */
2823         prs_init_empty(&state->rpc_out, state, MARSHALL);
2824
2825         status = create_rpc_alter_context(state->rpc_call_id,
2826                                           &state->cli->abstract_syntax,
2827                                           &state->cli->transfer_syntax,
2828                                           state->cli->auth->auth_level,
2829                                           &client_reply,
2830                                           &state->rpc_out);
2831         data_blob_free(&client_reply);
2832
2833         if (!NT_STATUS_IS_OK(status)) {
2834                 return status;
2835         }
2836
2837         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2838                                    &state->rpc_out, RPC_ALTCONTRESP);
2839         if (subreq == NULL) {
2840                 return NT_STATUS_NO_MEMORY;
2841         }
2842         tevent_req_set_callback(subreq, rpc_bind_ntlmssp_api_done, req);
2843         return NT_STATUS_OK;
2844 }
2845
2846 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq)
2847 {
2848         struct async_req *req = tevent_req_callback_data(
2849                 subreq, struct async_req);
2850         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2851                 req->private_data, struct rpc_pipe_bind_state);
2852         DATA_BLOB server_spnego_response = data_blob_null;
2853         DATA_BLOB tmp_blob = data_blob_null;
2854         prs_struct reply_pdu;
2855         struct rpc_hdr_info hdr;
2856         struct rpc_hdr_auth_info hdr_auth;
2857         NTSTATUS status;
2858
2859         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2860         TALLOC_FREE(subreq);
2861         if (!NT_STATUS_IS_OK(status)) {
2862                 async_req_nterror(req, status);
2863                 return;
2864         }
2865
2866         /* Get the auth blob from the reply. */
2867         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2868                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2869                           "unmarshall RPC_HDR.\n"));
2870                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2871                 return;
2872         }
2873
2874         if (!prs_set_offset(
2875                     &reply_pdu,
2876                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2877                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2878                 return;
2879         }
2880
2881         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2882                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2883                 return;
2884         }
2885
2886         server_spnego_response = data_blob(NULL, hdr.auth_len);
2887         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2888                           hdr.auth_len);
2889
2890         /* Check we got a valid auth response. */
2891         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2892                                         OID_NTLMSSP, &tmp_blob)) {
2893                 data_blob_free(&server_spnego_response);
2894                 data_blob_free(&tmp_blob);
2895                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2896                 return;
2897         }
2898
2899         data_blob_free(&server_spnego_response);
2900         data_blob_free(&tmp_blob);
2901
2902         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2903                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2904         async_req_done(req);
2905 }
2906
2907 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2908 {
2909         return async_req_simple_recv_ntstatus(req);
2910 }
2911
2912 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2913                        struct cli_pipe_auth_data *auth)
2914 {
2915         TALLOC_CTX *frame = talloc_stackframe();
2916         struct event_context *ev;
2917         struct async_req *req;
2918         NTSTATUS status = NT_STATUS_NO_MEMORY;
2919
2920         ev = event_context_init(frame);
2921         if (ev == NULL) {
2922                 goto fail;
2923         }
2924
2925         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2926         if (req == NULL) {
2927                 goto fail;
2928         }
2929
2930         while (req->state < ASYNC_REQ_DONE) {
2931                 event_loop_once(ev);
2932         }
2933
2934         status = rpc_pipe_bind_recv(req);
2935  fail:
2936         TALLOC_FREE(frame);
2937         return status;
2938 }
2939
2940 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2941                                 unsigned int timeout)
2942 {
2943         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2944
2945         if (cli == NULL) {
2946                 return 0;
2947         }
2948         return cli_set_timeout(cli, timeout);
2949 }
2950
2951 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2952 {
2953         struct cli_state *cli;
2954
2955         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2956             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2957                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2958                 return true;
2959         }
2960
2961         cli = rpc_pipe_np_smb_conn(rpc_cli);
2962         if (cli == NULL) {
2963                 return false;
2964         }
2965         E_md4hash(cli->password ? cli->password : "", nt_hash);
2966         return true;
2967 }
2968
2969 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2970                                struct cli_pipe_auth_data **presult)
2971 {
2972         struct cli_pipe_auth_data *result;
2973
2974         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2975         if (result == NULL) {
2976                 return NT_STATUS_NO_MEMORY;
2977         }
2978
2979         result->auth_type = PIPE_AUTH_TYPE_NONE;
2980         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2981
2982         result->user_name = talloc_strdup(result, "");
2983         result->domain = talloc_strdup(result, "");
2984         if ((result->user_name == NULL) || (result->domain == NULL)) {
2985                 TALLOC_FREE(result);
2986                 return NT_STATUS_NO_MEMORY;
2987         }
2988
2989         *presult = result;
2990         return NT_STATUS_OK;
2991 }
2992
2993 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2994 {
2995         ntlmssp_end(&auth->a_u.ntlmssp_state);
2996         return 0;
2997 }
2998
2999 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3000                                   enum pipe_auth_type auth_type,
3001                                   enum pipe_auth_level auth_level,
3002                                   const char *domain,
3003                                   const char *username,
3004                                   const char *password,
3005                                   struct cli_pipe_auth_data **presult)
3006 {
3007         struct cli_pipe_auth_data *result;
3008         NTSTATUS status;
3009
3010         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3011         if (result == NULL) {
3012                 return NT_STATUS_NO_MEMORY;
3013         }
3014
3015         result->auth_type = auth_type;
3016         result->auth_level = auth_level;
3017
3018         result->user_name = talloc_strdup(result, username);
3019         result->domain = talloc_strdup(result, domain);
3020         if ((result->user_name == NULL) || (result->domain == NULL)) {
3021                 status = NT_STATUS_NO_MEMORY;
3022                 goto fail;
3023         }
3024
3025         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3026         if (!NT_STATUS_IS_OK(status)) {
3027                 goto fail;
3028         }
3029
3030         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3031
3032         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3033         if (!NT_STATUS_IS_OK(status)) {
3034                 goto fail;
3035         }
3036
3037         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3038         if (!NT_STATUS_IS_OK(status)) {
3039                 goto fail;
3040         }
3041
3042         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3043         if (!NT_STATUS_IS_OK(status)) {
3044                 goto fail;
3045         }
3046
3047         /*
3048          * Turn off sign+seal to allow selected auth level to turn it back on.
3049          */
3050         result->a_u.ntlmssp_state->neg_flags &=
3051                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3052
3053         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3054                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3055         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3056                 result->a_u.ntlmssp_state->neg_flags
3057                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3058         }
3059
3060         *presult = result;
3061         return NT_STATUS_OK;
3062
3063  fail:
3064         TALLOC_FREE(result);
3065         return status;
3066 }
3067
3068 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3069                                    enum pipe_auth_level auth_level,
3070                                    const uint8_t sess_key[16],
3071                                    struct cli_pipe_auth_data **presult)
3072 {
3073         struct cli_pipe_auth_data *result;
3074
3075         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3076         if (result == NULL) {
3077                 return NT_STATUS_NO_MEMORY;
3078         }
3079
3080         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3081         result->auth_level = auth_level;
3082
3083         result->user_name = talloc_strdup(result, "");
3084         result->domain = talloc_strdup(result, domain);
3085         if ((result->user_name == NULL) || (result->domain == NULL)) {
3086                 goto fail;
3087         }
3088
3089         result->a_u.schannel_auth = talloc(result,
3090                                            struct schannel_auth_struct);
3091         if (result->a_u.schannel_auth == NULL) {
3092                 goto fail;
3093         }
3094
3095         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3096                sizeof(result->a_u.schannel_auth->sess_key));
3097         result->a_u.schannel_auth->seq_num = 0;
3098
3099         *presult = result;
3100         return NT_STATUS_OK;
3101
3102  fail:
3103         TALLOC_FREE(result);
3104         return NT_STATUS_NO_MEMORY;
3105 }
3106
3107 #ifdef HAVE_KRB5
3108 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3109 {
3110         data_blob_free(&auth->session_key);
3111         return 0;
3112 }
3113 #endif
3114
3115 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3116                                    enum pipe_auth_level auth_level,
3117                                    const char *service_princ,
3118                                    const char *username,
3119                                    const char *password,
3120                                    struct cli_pipe_auth_data **presult)
3121 {
3122 #ifdef HAVE_KRB5
3123         struct cli_pipe_auth_data *result;
3124
3125         if ((username != NULL) && (password != NULL)) {
3126                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3127                 if (ret != 0) {
3128                         return NT_STATUS_ACCESS_DENIED;
3129                 }
3130         }
3131
3132         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3133         if (result == NULL) {
3134                 return NT_STATUS_NO_MEMORY;
3135         }
3136
3137         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3138         result->auth_level = auth_level;
3139
3140         /*
3141          * Username / domain need fixing!
3142          */
3143         result->user_name = talloc_strdup(result, "");
3144         result->domain = talloc_strdup(result, "");
3145         if ((result->user_name == NULL) || (result->domain == NULL)) {
3146                 goto fail;
3147         }
3148
3149         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3150                 result, struct kerberos_auth_struct);
3151         if (result->a_u.kerberos_auth == NULL) {
3152                 goto fail;
3153         }
3154         talloc_set_destructor(result->a_u.kerberos_auth,
3155                               cli_auth_kerberos_data_destructor);
3156
3157         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3158                 result, service_princ);
3159         if (result->a_u.kerberos_auth->service_principal == NULL) {
3160                 goto fail;