Convert rpc_write to tevent_req
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct async_req *subreq);
211
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                         struct event_context *ev,
214                                         struct rpc_cli_transport *transport,
215                                         uint8_t *data, size_t size)
216 {
217         struct tevent_req *req;
218         struct async_req *subreq;
219         struct rpc_read_state *state;
220
221         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
222         if (req == NULL) {
223                 return NULL;
224         }
225         state->ev = ev;
226         state->transport = transport;
227         state->data = data;
228         state->size = size;
229         state->num_read = 0;
230
231         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232
233         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
234                                       transport->priv);
235         if (subreq == NULL) {
236                 goto fail;
237         }
238         subreq->async.fn = rpc_read_done;
239         subreq->async.priv = req;
240         return req;
241
242  fail:
243         TALLOC_FREE(req);
244         return NULL;
245 }
246
247 static void rpc_read_done(struct async_req *subreq)
248 {
249         struct tevent_req *req = talloc_get_type_abort(
250                 subreq->async.priv, struct tevent_req);
251         struct rpc_read_state *state = tevent_req_data(
252                 req, struct rpc_read_state);
253         NTSTATUS status;
254         ssize_t received;
255
256         status = state->transport->read_recv(subreq, &received);
257         TALLOC_FREE(subreq);
258         if (!NT_STATUS_IS_OK(status)) {
259                 tevent_req_nterror(req, status);
260                 return;
261         }
262
263         state->num_read += received;
264         if (state->num_read == state->size) {
265                 tevent_req_done(req);
266                 return;
267         }
268
269         subreq = state->transport->read_send(state, state->ev,
270                                              state->data + state->num_read,
271                                              state->size - state->num_read,
272                                              state->transport->priv);
273         if (tevent_req_nomem(subreq, req)) {
274                 return;
275         }
276         subreq->async.fn = rpc_read_done;
277         subreq->async.priv = req;
278 }
279
280 static NTSTATUS rpc_read_recv(struct tevent_req *req)
281 {
282         return tevent_req_simple_recv_ntstatus(req);
283 }
284
285 struct rpc_write_state {
286         struct event_context *ev;
287         struct rpc_cli_transport *transport;
288         const uint8_t *data;
289         size_t size;
290         size_t num_written;
291 };
292
293 static void rpc_write_done(struct async_req *subreq);
294
295 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
296                                          struct event_context *ev,
297                                          struct rpc_cli_transport *transport,
298                                          const uint8_t *data, size_t size)
299 {
300         struct tevent_req *req;
301         struct async_req *subreq;
302         struct rpc_write_state *state;
303
304         req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
305         if (req == NULL) {
306                 return NULL;
307         }
308         state->ev = ev;
309         state->transport = transport;
310         state->data = data;
311         state->size = size;
312         state->num_written = 0;
313
314         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
315
316         subreq = transport->write_send(state, ev, data, size, transport->priv);
317         if (subreq == NULL) {
318                 goto fail;
319         }
320         subreq->async.fn = rpc_write_done;
321         subreq->async.priv = req;
322         return req;
323  fail:
324         TALLOC_FREE(req);
325         return NULL;
326 }
327
328 static void rpc_write_done(struct async_req *subreq)
329 {
330         struct tevent_req *req = talloc_get_type_abort(
331                 subreq->async.priv, struct tevent_req);
332         struct rpc_write_state *state = tevent_req_data(
333                 req, struct rpc_write_state);
334         NTSTATUS status;
335         ssize_t written;
336
337         status = state->transport->write_recv(subreq, &written);
338         TALLOC_FREE(subreq);
339         if (!NT_STATUS_IS_OK(status)) {
340                 tevent_req_nterror(req, status);
341                 return;
342         }
343
344         state->num_written += written;
345
346         if (state->num_written == state->size) {
347                 tevent_req_done(req);
348                 return;
349         }
350
351         subreq = state->transport->write_send(state, state->ev,
352                                               state->data + state->num_written,
353                                               state->size - state->num_written,
354                                               state->transport->priv);
355         if (tevent_req_nomem(subreq, req)) {
356                 return;
357         }
358         subreq->async.fn = rpc_write_done;
359         subreq->async.priv = req;
360 }
361
362 static NTSTATUS rpc_write_recv(struct tevent_req *req)
363 {
364         return tevent_req_simple_recv_ntstatus(req);
365 }
366
367
368 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
369                                  struct rpc_hdr_info *prhdr,
370                                  prs_struct *pdu)
371 {
372         /*
373          * This next call sets the endian bit correctly in current_pdu. We
374          * will propagate this to rbuf later.
375          */
376
377         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
378                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
379                 return NT_STATUS_BUFFER_TOO_SMALL;
380         }
381
382         if (prhdr->frag_len > cli->max_recv_frag) {
383                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
384                           " we only allow %d\n", (int)prhdr->frag_len,
385                           (int)cli->max_recv_frag));
386                 return NT_STATUS_BUFFER_TOO_SMALL;
387         }
388
389         return NT_STATUS_OK;
390 }
391
392 /****************************************************************************
393  Try and get a PDU's worth of data from current_pdu. If not, then read more
394  from the wire.
395  ****************************************************************************/
396
397 struct get_complete_frag_state {
398         struct event_context *ev;
399         struct rpc_pipe_client *cli;
400         struct rpc_hdr_info *prhdr;
401         prs_struct *pdu;
402 };
403
404 static void get_complete_frag_got_header(struct tevent_req *subreq);
405 static void get_complete_frag_got_rest(struct tevent_req *subreq);
406
407 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
408                                                struct event_context *ev,
409                                                struct rpc_pipe_client *cli,
410                                                struct rpc_hdr_info *prhdr,
411                                                prs_struct *pdu)
412 {
413         struct async_req *result;
414         struct tevent_req *subreq;
415         struct get_complete_frag_state *state;
416         uint32_t pdu_len;
417         NTSTATUS status;
418
419         if (!async_req_setup(mem_ctx, &result, &state,
420                              struct get_complete_frag_state)) {
421                 return NULL;
422         }
423         state->ev = ev;
424         state->cli = cli;
425         state->prhdr = prhdr;
426         state->pdu = pdu;
427
428         pdu_len = prs_data_size(pdu);
429         if (pdu_len < RPC_HEADER_LEN) {
430                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
431                         status = NT_STATUS_NO_MEMORY;
432                         goto post_status;
433                 }
434                 subreq = rpc_read_send(
435                         state, state->ev,
436                         state->cli->transport,
437                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
438                         RPC_HEADER_LEN - pdu_len);
439                 if (subreq == NULL) {
440                         status = NT_STATUS_NO_MEMORY;
441                         goto post_status;
442                 }
443                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
444                                         result);
445                 return result;
446         }
447
448         status = parse_rpc_header(cli, prhdr, pdu);
449         if (!NT_STATUS_IS_OK(status)) {
450                 goto post_status;
451         }
452
453         /*
454          * Ensure we have frag_len bytes of data.
455          */
456         if (pdu_len < prhdr->frag_len) {
457                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
458                         status = NT_STATUS_NO_MEMORY;
459                         goto post_status;
460                 }
461                 subreq = rpc_read_send(state, state->ev,
462                                        state->cli->transport,
463                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
464                                        prhdr->frag_len - pdu_len);
465                 if (subreq == NULL) {
466                         status = NT_STATUS_NO_MEMORY;
467                         goto post_status;
468                 }
469                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
470                                         result);
471                 return result;
472         }
473
474         status = NT_STATUS_OK;
475  post_status:
476         if (async_post_ntstatus(result, ev, status)) {
477                 return result;
478         }
479         TALLOC_FREE(result);
480         return NULL;
481 }
482
483 static void get_complete_frag_got_header(struct tevent_req *subreq)
484 {
485         struct async_req *req = tevent_req_callback_data(
486                 subreq, struct async_req);
487         struct get_complete_frag_state *state = talloc_get_type_abort(
488                 req->private_data, struct get_complete_frag_state);
489         NTSTATUS status;
490
491         status = rpc_read_recv(subreq);
492         TALLOC_FREE(subreq);
493         if (!NT_STATUS_IS_OK(status)) {
494                 async_req_nterror(req, status);
495                 return;
496         }
497
498         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
499         if (!NT_STATUS_IS_OK(status)) {
500                 async_req_nterror(req, status);
501                 return;
502         }
503
504         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
505                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
506                 return;
507         }
508
509         /*
510          * We're here in this piece of code because we've read exactly
511          * RPC_HEADER_LEN bytes into state->pdu.
512          */
513
514         subreq = rpc_read_send(
515                 state, state->ev, state->cli->transport,
516                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
517                 state->prhdr->frag_len - RPC_HEADER_LEN);
518         if (async_req_nomem(subreq, req)) {
519                 return;
520         }
521         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
522 }
523
524 static void get_complete_frag_got_rest(struct tevent_req *subreq)
525 {
526         struct async_req *req = tevent_req_callback_data(
527                 subreq, struct async_req);
528         NTSTATUS status;
529
530         status = rpc_read_recv(subreq);
531         TALLOC_FREE(subreq);
532         if (!NT_STATUS_IS_OK(status)) {
533                 async_req_nterror(req, status);
534                 return;
535         }
536         async_req_done(req);
537 }
538
539 static NTSTATUS get_complete_frag_recv(struct async_req *req)
540 {
541         return async_req_simple_recv_ntstatus(req);
542 }
543
544 /****************************************************************************
545  NTLMSSP specific sign/seal.
546  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
547  In fact I should probably abstract these into identical pieces of code... JRA.
548  ****************************************************************************/
549
550 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
551                                 prs_struct *current_pdu,
552                                 uint8 *p_ss_padding_len)
553 {
554         RPC_HDR_AUTH auth_info;
555         uint32 save_offset = prs_offset(current_pdu);
556         uint32 auth_len = prhdr->auth_len;
557         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
558         unsigned char *data = NULL;
559         size_t data_len;
560         unsigned char *full_packet_data = NULL;
561         size_t full_packet_data_len;
562         DATA_BLOB auth_blob;
563         NTSTATUS status;
564
565         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
566             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
567                 return NT_STATUS_OK;
568         }
569
570         if (!ntlmssp_state) {
571                 return NT_STATUS_INVALID_PARAMETER;
572         }
573
574         /* Ensure there's enough data for an authenticated response. */
575         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
576                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
577                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
578                         (unsigned int)auth_len ));
579                 return NT_STATUS_BUFFER_TOO_SMALL;
580         }
581
582         /*
583          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
584          * after the RPC header.
585          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
586          * functions as NTLMv2 checks the rpc headers also.
587          */
588
589         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
590         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
591
592         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
593         full_packet_data_len = prhdr->frag_len - auth_len;
594
595         /* Pull the auth header and the following data into a blob. */
596         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
597                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
598                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
599                 return NT_STATUS_BUFFER_TOO_SMALL;
600         }
601
602         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
603                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
604                 return NT_STATUS_BUFFER_TOO_SMALL;
605         }
606
607         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
608         auth_blob.length = auth_len;
609
610         switch (cli->auth->auth_level) {
611                 case PIPE_AUTH_LEVEL_PRIVACY:
612                         /* Data is encrypted. */
613                         status = ntlmssp_unseal_packet(ntlmssp_state,
614                                                         data, data_len,
615                                                         full_packet_data,
616                                                         full_packet_data_len,
617                                                         &auth_blob);
618                         if (!NT_STATUS_IS_OK(status)) {
619                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
620                                         "packet from %s. Error was %s.\n",
621                                         rpccli_pipe_txt(debug_ctx(), cli),
622                                         nt_errstr(status) ));
623                                 return status;
624                         }
625                         break;
626                 case PIPE_AUTH_LEVEL_INTEGRITY:
627                         /* Data is signed. */
628                         status = ntlmssp_check_packet(ntlmssp_state,
629                                                         data, data_len,
630                                                         full_packet_data,
631                                                         full_packet_data_len,
632                                                         &auth_blob);
633                         if (!NT_STATUS_IS_OK(status)) {
634                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
635                                         "packet from %s. Error was %s.\n",
636                                         rpccli_pipe_txt(debug_ctx(), cli),
637                                         nt_errstr(status) ));
638                                 return status;
639                         }
640                         break;
641                 default:
642                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
643                                   "auth level %d\n", cli->auth->auth_level));
644                         return NT_STATUS_INVALID_INFO_CLASS;
645         }
646
647         /*
648          * Return the current pointer to the data offset.
649          */
650
651         if(!prs_set_offset(current_pdu, save_offset)) {
652                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
653                         (unsigned int)save_offset ));
654                 return NT_STATUS_BUFFER_TOO_SMALL;
655         }
656
657         /*
658          * Remember the padding length. We must remove it from the real data
659          * stream once the sign/seal is done.
660          */
661
662         *p_ss_padding_len = auth_info.auth_pad_len;
663
664         return NT_STATUS_OK;
665 }
666
667 /****************************************************************************
668  schannel specific sign/seal.
669  ****************************************************************************/
670
671 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
672                                 prs_struct *current_pdu,
673                                 uint8 *p_ss_padding_len)
674 {
675         RPC_HDR_AUTH auth_info;
676         RPC_AUTH_SCHANNEL_CHK schannel_chk;
677         uint32 auth_len = prhdr->auth_len;
678         uint32 save_offset = prs_offset(current_pdu);
679         struct schannel_auth_struct *schannel_auth =
680                 cli->auth->a_u.schannel_auth;
681         uint32 data_len;
682
683         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
684             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
685                 return NT_STATUS_OK;
686         }
687
688         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
689                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
690                 return NT_STATUS_INVALID_PARAMETER;
691         }
692
693         if (!schannel_auth) {
694                 return NT_STATUS_INVALID_PARAMETER;
695         }
696
697         /* Ensure there's enough data for an authenticated response. */
698         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
699                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
700                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
701                         (unsigned int)auth_len ));
702                 return NT_STATUS_INVALID_PARAMETER;
703         }
704
705         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
706
707         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
708                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
709                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
710                 return NT_STATUS_BUFFER_TOO_SMALL;
711         }
712
713         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
714                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
715                 return NT_STATUS_BUFFER_TOO_SMALL;
716         }
717
718         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
719                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
720                         auth_info.auth_type));
721                 return NT_STATUS_BUFFER_TOO_SMALL;
722         }
723
724         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
725                                 &schannel_chk, current_pdu, 0)) {
726                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
727                 return NT_STATUS_BUFFER_TOO_SMALL;
728         }
729
730         if (!schannel_decode(schannel_auth,
731                         cli->auth->auth_level,
732                         SENDER_IS_ACCEPTOR,
733                         &schannel_chk,
734                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
735                         data_len)) {
736                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
737                                 "Connection to %s.\n",
738                                 rpccli_pipe_txt(debug_ctx(), cli)));
739                 return NT_STATUS_INVALID_PARAMETER;
740         }
741
742         /* The sequence number gets incremented on both send and receive. */
743         schannel_auth->seq_num++;
744
745         /*
746          * Return the current pointer to the data offset.
747          */
748
749         if(!prs_set_offset(current_pdu, save_offset)) {
750                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
751                         (unsigned int)save_offset ));
752                 return NT_STATUS_BUFFER_TOO_SMALL;
753         }
754
755         /*
756          * Remember the padding length. We must remove it from the real data
757          * stream once the sign/seal is done.
758          */
759
760         *p_ss_padding_len = auth_info.auth_pad_len;
761
762         return NT_STATUS_OK;
763 }
764
765 /****************************************************************************
766  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
767  ****************************************************************************/
768
769 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
770                                 prs_struct *current_pdu,
771                                 uint8 *p_ss_padding_len)
772 {
773         NTSTATUS ret = NT_STATUS_OK;
774
775         /* Paranioa checks for auth_len. */
776         if (prhdr->auth_len) {
777                 if (prhdr->auth_len > prhdr->frag_len) {
778                         return NT_STATUS_INVALID_PARAMETER;
779                 }
780
781                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
782                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
783                         /* Integer wrap attempt. */
784                         return NT_STATUS_INVALID_PARAMETER;
785                 }
786         }
787
788         /*
789          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
790          */
791
792         switch(cli->auth->auth_type) {
793                 case PIPE_AUTH_TYPE_NONE:
794                         if (prhdr->auth_len) {
795                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
796                                           "Connection to %s - got non-zero "
797                                           "auth len %u.\n",
798                                         rpccli_pipe_txt(debug_ctx(), cli),
799                                         (unsigned int)prhdr->auth_len ));
800                                 return NT_STATUS_INVALID_PARAMETER;
801                         }
802                         break;
803
804                 case PIPE_AUTH_TYPE_NTLMSSP:
805                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
806                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
807                         if (!NT_STATUS_IS_OK(ret)) {
808                                 return ret;
809                         }
810                         break;
811
812                 case PIPE_AUTH_TYPE_SCHANNEL:
813                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
814                         if (!NT_STATUS_IS_OK(ret)) {
815                                 return ret;
816                         }
817                         break;
818
819                 case PIPE_AUTH_TYPE_KRB5:
820                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
821                 default:
822                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
823                                   "to %s - unknown internal auth type %u.\n",
824                                   rpccli_pipe_txt(debug_ctx(), cli),
825                                   cli->auth->auth_type ));
826                         return NT_STATUS_INVALID_INFO_CLASS;
827         }
828
829         return NT_STATUS_OK;
830 }
831
832 /****************************************************************************
833  Do basic authentication checks on an incoming pdu.
834  ****************************************************************************/
835
836 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
837                         prs_struct *current_pdu,
838                         uint8 expected_pkt_type,
839                         char **ppdata,
840                         uint32 *pdata_len,
841                         prs_struct *return_data)
842 {
843
844         NTSTATUS ret = NT_STATUS_OK;
845         uint32 current_pdu_len = prs_data_size(current_pdu);
846
847         if (current_pdu_len != prhdr->frag_len) {
848                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
849                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
850                 return NT_STATUS_INVALID_PARAMETER;
851         }
852
853         /*
854          * Point the return values at the real data including the RPC
855          * header. Just in case the caller wants it.
856          */
857         *ppdata = prs_data_p(current_pdu);
858         *pdata_len = current_pdu_len;
859
860         /* Ensure we have the correct type. */
861         switch (prhdr->pkt_type) {
862                 case RPC_ALTCONTRESP:
863                 case RPC_BINDACK:
864
865                         /* Alter context and bind ack share the same packet definitions. */
866                         break;
867
868
869                 case RPC_RESPONSE:
870                 {
871                         RPC_HDR_RESP rhdr_resp;
872                         uint8 ss_padding_len = 0;
873
874                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
875                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
876                                 return NT_STATUS_BUFFER_TOO_SMALL;
877                         }
878
879                         /* Here's where we deal with incoming sign/seal. */
880                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
881                                         current_pdu, &ss_padding_len);
882                         if (!NT_STATUS_IS_OK(ret)) {
883                                 return ret;
884                         }
885
886                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
887                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
888
889                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
890                                 return NT_STATUS_BUFFER_TOO_SMALL;
891                         }
892
893                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
894
895                         /* Remember to remove the auth footer. */
896                         if (prhdr->auth_len) {
897                                 /* We've already done integer wrap tests on auth_len in
898                                         cli_pipe_validate_rpc_response(). */
899                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
900                                         return NT_STATUS_BUFFER_TOO_SMALL;
901                                 }
902                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
903                         }
904
905                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
906                                 current_pdu_len, *pdata_len, ss_padding_len ));
907
908                         /*
909                          * If this is the first reply, and the allocation hint is reasonably, try and
910                          * set up the return_data parse_struct to the correct size.
911                          */
912
913                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
914                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
915                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
916                                                 "too large to allocate\n",
917                                                 (unsigned int)rhdr_resp.alloc_hint ));
918                                         return NT_STATUS_NO_MEMORY;
919                                 }
920                         }
921
922                         break;
923                 }
924
925                 case RPC_BINDNACK:
926                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
927                                   "received from %s!\n",
928                                   rpccli_pipe_txt(debug_ctx(), cli)));
929                         /* Use this for now... */
930                         return NT_STATUS_NETWORK_ACCESS_DENIED;
931
932                 case RPC_FAULT:
933                 {
934                         RPC_HDR_RESP rhdr_resp;
935                         RPC_HDR_FAULT fault_resp;
936
937                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
938                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
939                                 return NT_STATUS_BUFFER_TOO_SMALL;
940                         }
941
942                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
943                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
944                                 return NT_STATUS_BUFFER_TOO_SMALL;
945                         }
946
947                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
948                                   "code %s received from %s!\n",
949                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
950                                 rpccli_pipe_txt(debug_ctx(), cli)));
951                         if (NT_STATUS_IS_OK(fault_resp.status)) {
952                                 return NT_STATUS_UNSUCCESSFUL;
953                         } else {
954                                 return fault_resp.status;
955                         }
956                 }
957
958                 default:
959                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
960                                 "from %s!\n",
961                                 (unsigned int)prhdr->pkt_type,
962                                 rpccli_pipe_txt(debug_ctx(), cli)));
963                         return NT_STATUS_INVALID_INFO_CLASS;
964         }
965
966         if (prhdr->pkt_type != expected_pkt_type) {
967                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
968                           "got an unexpected RPC packet type - %u, not %u\n",
969                         rpccli_pipe_txt(debug_ctx(), cli),
970                         prhdr->pkt_type,
971                         expected_pkt_type));
972                 return NT_STATUS_INVALID_INFO_CLASS;
973         }
974
975         /* Do this just before return - we don't want to modify any rpc header
976            data before now as we may have needed to do cryptographic actions on
977            it before. */
978
979         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
980                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
981                         "setting fragment first/last ON.\n"));
982                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
983         }
984
985         return NT_STATUS_OK;
986 }
987
988 /****************************************************************************
989  Ensure we eat the just processed pdu from the current_pdu prs_struct.
990  Normally the frag_len and buffer size will match, but on the first trans
991  reply there is a theoretical chance that buffer size > frag_len, so we must
992  deal with that.
993  ****************************************************************************/
994
995 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
996 {
997         uint32 current_pdu_len = prs_data_size(current_pdu);
998
999         if (current_pdu_len < prhdr->frag_len) {
1000                 return NT_STATUS_BUFFER_TOO_SMALL;
1001         }
1002
1003         /* Common case. */
1004         if (current_pdu_len == (uint32)prhdr->frag_len) {
1005                 prs_mem_free(current_pdu);
1006                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1007                 /* Make current_pdu dynamic with no memory. */
1008                 prs_give_memory(current_pdu, 0, 0, True);
1009                 return NT_STATUS_OK;
1010         }
1011
1012         /*
1013          * Oh no ! More data in buffer than we processed in current pdu.
1014          * Cheat. Move the data down and shrink the buffer.
1015          */
1016
1017         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1018                         current_pdu_len - prhdr->frag_len);
1019
1020         /* Remember to set the read offset back to zero. */
1021         prs_set_offset(current_pdu, 0);
1022
1023         /* Shrink the buffer. */
1024         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1025                 return NT_STATUS_BUFFER_TOO_SMALL;
1026         }
1027
1028         return NT_STATUS_OK;
1029 }
1030
1031 /****************************************************************************
1032  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1033 ****************************************************************************/
1034
1035 struct cli_api_pipe_state {
1036         struct event_context *ev;
1037         struct rpc_cli_transport *transport;
1038         uint8_t *rdata;
1039         uint32_t rdata_len;
1040 };
1041
1042 static void cli_api_pipe_trans_done(struct async_req *subreq);
1043 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1044 static void cli_api_pipe_read_done(struct async_req *subreq);
1045
1046 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1047                                            struct event_context *ev,
1048                                            struct rpc_cli_transport *transport,
1049                                            uint8_t *data, size_t data_len,
1050                                            uint32_t max_rdata_len)
1051 {
1052         struct async_req *result, *subreq;
1053         struct tevent_req *subreq2;
1054         struct cli_api_pipe_state *state;
1055         NTSTATUS status;
1056
1057         if (!async_req_setup(mem_ctx, &result, &state,
1058                              struct cli_api_pipe_state)) {
1059                 return NULL;
1060         }
1061         state->ev = ev;
1062         state->transport = transport;
1063
1064         if (max_rdata_len < RPC_HEADER_LEN) {
1065                 /*
1066                  * For a RPC reply we always need at least RPC_HEADER_LEN
1067                  * bytes. We check this here because we will receive
1068                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1069                  */
1070                 status = NT_STATUS_INVALID_PARAMETER;
1071                 goto post_status;
1072         }
1073
1074         if (transport->trans_send != NULL) {
1075                 subreq = transport->trans_send(state, ev, data, data_len,
1076                                                max_rdata_len, transport->priv);
1077                 if (subreq == NULL) {
1078                         status = NT_STATUS_NO_MEMORY;
1079                         goto post_status;
1080                 }
1081                 subreq->async.fn = cli_api_pipe_trans_done;
1082                 subreq->async.priv = result;
1083                 return result;
1084         }
1085
1086         /*
1087          * If the transport does not provide a "trans" routine, i.e. for
1088          * example the ncacn_ip_tcp transport, do the write/read step here.
1089          */
1090
1091         subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1092         if (subreq2 == NULL) {
1093                 goto fail;
1094         }
1095         tevent_req_set_callback(subreq2, cli_api_pipe_write_done, result);
1096         return result;
1097
1098         status = NT_STATUS_INVALID_PARAMETER;
1099
1100  post_status:
1101         if (async_post_ntstatus(result, ev, status)) {
1102                 return result;
1103         }
1104  fail:
1105         TALLOC_FREE(result);
1106         return NULL;
1107 }
1108
1109 static void cli_api_pipe_trans_done(struct async_req *subreq)
1110 {
1111         struct async_req *req = talloc_get_type_abort(
1112                 subreq->async.priv, struct async_req);
1113         struct cli_api_pipe_state *state = talloc_get_type_abort(
1114                 req->private_data, struct cli_api_pipe_state);
1115         NTSTATUS status;
1116
1117         status = state->transport->trans_recv(subreq, state, &state->rdata,
1118                                               &state->rdata_len);
1119         TALLOC_FREE(subreq);
1120         if (!NT_STATUS_IS_OK(status)) {
1121                 async_req_nterror(req, status);
1122                 return;
1123         }
1124         async_req_done(req);
1125 }
1126
1127 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1128 {
1129         struct async_req *req = tevent_req_callback_data(
1130                 subreq, struct async_req);
1131         struct cli_api_pipe_state *state = talloc_get_type_abort(
1132                 req->private_data, struct cli_api_pipe_state);
1133         struct async_req *subreq2;
1134         NTSTATUS status;
1135
1136         status = rpc_write_recv(subreq);
1137         TALLOC_FREE(subreq);
1138         if (!NT_STATUS_IS_OK(status)) {
1139                 async_req_nterror(req, status);
1140                 return;
1141         }
1142
1143         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1144         if (async_req_nomem(state->rdata, req)) {
1145                 return;
1146         }
1147
1148         /*
1149          * We don't need to use rpc_read_send here, the upper layer will cope
1150          * with a short read, transport->trans_send could also return less
1151          * than state->max_rdata_len.
1152          */
1153         subreq2 = state->transport->read_send(state, state->ev, state->rdata,
1154                                               RPC_HEADER_LEN,
1155                                               state->transport->priv);
1156         if (async_req_nomem(subreq2, req)) {
1157                 return;
1158         }
1159         subreq2->async.fn = cli_api_pipe_read_done;
1160         subreq2->async.priv = req;
1161 }
1162
1163 static void cli_api_pipe_read_done(struct async_req *subreq)
1164 {
1165         struct async_req *req = talloc_get_type_abort(
1166                 subreq->async.priv, struct async_req);
1167         struct cli_api_pipe_state *state = talloc_get_type_abort(
1168                 req->private_data, struct cli_api_pipe_state);
1169         NTSTATUS status;
1170         ssize_t received;
1171
1172         status = state->transport->read_recv(subreq, &received);
1173         TALLOC_FREE(subreq);
1174         if (!NT_STATUS_IS_OK(status)) {
1175                 async_req_nterror(req, status);
1176                 return;
1177         }
1178         state->rdata_len = received;
1179         async_req_done(req);
1180 }
1181
1182 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1183                                   uint8_t **prdata, uint32_t *prdata_len)
1184 {
1185         struct cli_api_pipe_state *state = talloc_get_type_abort(
1186                 req->private_data, struct cli_api_pipe_state);
1187         NTSTATUS status;
1188
1189         if (async_req_is_nterror(req, &status)) {
1190                 return status;
1191         }
1192
1193         *prdata = talloc_move(mem_ctx, &state->rdata);
1194         *prdata_len = state->rdata_len;
1195         return NT_STATUS_OK;
1196 }
1197
1198 /****************************************************************************
1199  Send data on an rpc pipe via trans. The prs_struct data must be the last
1200  pdu fragment of an NDR data stream.
1201
1202  Receive response data from an rpc pipe, which may be large...
1203
1204  Read the first fragment: unfortunately have to use SMBtrans for the first
1205  bit, then SMBreadX for subsequent bits.
1206
1207  If first fragment received also wasn't the last fragment, continue
1208  getting fragments until we _do_ receive the last fragment.
1209
1210  Request/Response PDU's look like the following...
1211
1212  |<------------------PDU len----------------------------------------------->|
1213  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1214
1215  +------------+-----------------+-------------+---------------+-------------+
1216  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1217  +------------+-----------------+-------------+---------------+-------------+
1218
1219  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1220  signing & sealing being negotiated.
1221
1222  ****************************************************************************/
1223
1224 struct rpc_api_pipe_state {
1225         struct event_context *ev;
1226         struct rpc_pipe_client *cli;
1227         uint8_t expected_pkt_type;
1228
1229         prs_struct incoming_frag;
1230         struct rpc_hdr_info rhdr;
1231
1232         prs_struct incoming_pdu;        /* Incoming reply */
1233         uint32_t incoming_pdu_offset;
1234 };
1235
1236 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1237 {
1238         prs_mem_free(&state->incoming_frag);
1239         prs_mem_free(&state->incoming_pdu);
1240         return 0;
1241 }
1242
1243 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1244 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1245
1246 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1247                                            struct event_context *ev,
1248                                            struct rpc_pipe_client *cli,
1249                                            prs_struct *data, /* Outgoing PDU */
1250                                            uint8_t expected_pkt_type)
1251 {
1252         struct async_req *result, *subreq;
1253         struct rpc_api_pipe_state *state;
1254         uint16_t max_recv_frag;
1255         NTSTATUS status;
1256
1257         if (!async_req_setup(mem_ctx, &result, &state,
1258                              struct rpc_api_pipe_state)) {
1259                 return NULL;
1260         }
1261         state->ev = ev;
1262         state->cli = cli;
1263         state->expected_pkt_type = expected_pkt_type;
1264         state->incoming_pdu_offset = 0;
1265
1266         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1267
1268         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1269         /* Make incoming_pdu dynamic with no memory. */
1270         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1271
1272         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1273
1274         /*
1275          * Ensure we're not sending too much.
1276          */
1277         if (prs_offset(data) > cli->max_xmit_frag) {
1278                 status = NT_STATUS_INVALID_PARAMETER;
1279                 goto post_status;
1280         }
1281
1282         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1283
1284         max_recv_frag = cli->max_recv_frag;
1285
1286 #ifdef DEVELOPER
1287         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1288 #endif
1289
1290         subreq = cli_api_pipe_send(state, ev, cli->transport,
1291                                    (uint8_t *)prs_data_p(data),
1292                                    prs_offset(data), max_recv_frag);
1293         if (subreq == NULL) {
1294                 status = NT_STATUS_NO_MEMORY;
1295                 goto post_status;
1296         }
1297         subreq->async.fn = rpc_api_pipe_trans_done;
1298         subreq->async.priv = result;
1299         return result;
1300
1301  post_status:
1302         if (async_post_ntstatus(result, ev, status)) {
1303                 return result;
1304         }
1305         TALLOC_FREE(result);
1306         return NULL;
1307 }
1308
1309 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1310 {
1311         struct async_req *req = talloc_get_type_abort(
1312                 subreq->async.priv, struct async_req);
1313         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1314                 req->private_data, struct rpc_api_pipe_state);
1315         NTSTATUS status;
1316         uint8_t *rdata = NULL;
1317         uint32_t rdata_len = 0;
1318         char *rdata_copy;
1319
1320         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1321         TALLOC_FREE(subreq);
1322         if (!NT_STATUS_IS_OK(status)) {
1323                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1324                 async_req_nterror(req, status);
1325                 return;
1326         }
1327
1328         if (rdata == NULL) {
1329                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1330                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1331                 async_req_done(req);
1332                 return;
1333         }
1334
1335         /*
1336          * Give the memory received from cli_trans as dynamic to the current
1337          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1338          * :-(
1339          */
1340         rdata_copy = (char *)memdup(rdata, rdata_len);
1341         TALLOC_FREE(rdata);
1342         if (async_req_nomem(rdata_copy, req)) {
1343                 return;
1344         }
1345         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1346
1347         /* Ensure we have enough data for a pdu. */
1348         subreq = get_complete_frag_send(state, state->ev, state->cli,
1349                                         &state->rhdr, &state->incoming_frag);
1350         if (async_req_nomem(subreq, req)) {
1351                 return;
1352         }
1353         subreq->async.fn = rpc_api_pipe_got_pdu;
1354         subreq->async.priv = req;
1355 }
1356
1357 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1358 {
1359         struct async_req *req = talloc_get_type_abort(
1360                 subreq->async.priv, struct async_req);
1361         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1362                 req->private_data, struct rpc_api_pipe_state);
1363         NTSTATUS status;
1364         char *rdata = NULL;
1365         uint32_t rdata_len = 0;
1366
1367         status = get_complete_frag_recv(subreq);
1368         TALLOC_FREE(subreq);
1369         if (!NT_STATUS_IS_OK(status)) {
1370                 DEBUG(5, ("get_complete_frag failed: %s\n",
1371                           nt_errstr(status)));
1372                 async_req_nterror(req, status);
1373                 return;
1374         }
1375
1376         status = cli_pipe_validate_current_pdu(
1377                 state->cli, &state->rhdr, &state->incoming_frag,
1378                 state->expected_pkt_type, &rdata, &rdata_len,
1379                 &state->incoming_pdu);
1380
1381         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1382                   (unsigned)prs_data_size(&state->incoming_frag),
1383                   (unsigned)state->incoming_pdu_offset,
1384                   nt_errstr(status)));
1385
1386         if (!NT_STATUS_IS_OK(status)) {
1387                 async_req_nterror(req, status);
1388                 return;
1389         }
1390
1391         if ((state->rhdr.flags & RPC_FLG_FIRST)
1392             && (state->rhdr.pack_type[0] == 0)) {
1393                 /*
1394                  * Set the data type correctly for big-endian data on the
1395                  * first packet.
1396                  */
1397                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1398                           "big-endian.\n",
1399                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1400                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1401         }
1402         /*
1403          * Check endianness on subsequent packets.
1404          */
1405         if (state->incoming_frag.bigendian_data
1406             != state->incoming_pdu.bigendian_data) {
1407                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1408                          "%s\n",
1409                          state->incoming_pdu.bigendian_data?"big":"little",
1410                          state->incoming_frag.bigendian_data?"big":"little"));
1411                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1412                 return;
1413         }
1414
1415         /* Now copy the data portion out of the pdu into rbuf. */
1416         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1417                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1418                 return;
1419         }
1420
1421         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1422                rdata, (size_t)rdata_len);
1423         state->incoming_pdu_offset += rdata_len;
1424
1425         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1426                                             &state->incoming_frag);
1427         if (!NT_STATUS_IS_OK(status)) {
1428                 async_req_nterror(req, status);
1429                 return;
1430         }
1431
1432         if (state->rhdr.flags & RPC_FLG_LAST) {
1433                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1434                           rpccli_pipe_txt(debug_ctx(), state->cli),
1435                           (unsigned)prs_data_size(&state->incoming_pdu)));
1436                 async_req_done(req);
1437                 return;
1438         }
1439
1440         subreq = get_complete_frag_send(state, state->ev, state->cli,
1441                                         &state->rhdr, &state->incoming_frag);
1442         if (async_req_nomem(subreq, req)) {
1443                 return;
1444         }
1445         subreq->async.fn = rpc_api_pipe_got_pdu;
1446         subreq->async.priv = req;
1447 }
1448
1449 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1450                                   prs_struct *reply_pdu)
1451 {
1452         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1453                 req->private_data, struct rpc_api_pipe_state);
1454         NTSTATUS status;
1455
1456         if (async_req_is_nterror(req, &status)) {
1457                 return status;
1458         }
1459
1460         *reply_pdu = state->incoming_pdu;
1461         reply_pdu->mem_ctx = mem_ctx;
1462
1463         /*
1464          * Prevent state->incoming_pdu from being freed in
1465          * rpc_api_pipe_state_destructor()
1466          */
1467         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1468
1469         return NT_STATUS_OK;
1470 }
1471
1472 /*******************************************************************
1473  Creates krb5 auth bind.
1474  ********************************************************************/
1475
1476 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1477                                                 enum pipe_auth_level auth_level,
1478                                                 RPC_HDR_AUTH *pauth_out,
1479                                                 prs_struct *auth_data)
1480 {
1481 #ifdef HAVE_KRB5
1482         int ret;
1483         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1484         DATA_BLOB tkt = data_blob_null;
1485         DATA_BLOB tkt_wrapped = data_blob_null;
1486
1487         /* We may change the pad length before marshalling. */
1488         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1489
1490         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1491                 a->service_principal ));
1492
1493         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1494
1495         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1496                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1497
1498         if (ret) {
1499                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1500                         "failed with %s\n",
1501                         a->service_principal,
1502                         error_message(ret) ));
1503
1504                 data_blob_free(&tkt);
1505                 prs_mem_free(auth_data);
1506                 return NT_STATUS_INVALID_PARAMETER;
1507         }
1508
1509         /* wrap that up in a nice GSS-API wrapping */
1510         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1511
1512         data_blob_free(&tkt);
1513
1514         /* Auth len in the rpc header doesn't include auth_header. */
1515         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1516                 data_blob_free(&tkt_wrapped);
1517                 prs_mem_free(auth_data);
1518                 return NT_STATUS_NO_MEMORY;
1519         }
1520
1521         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1522         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1523
1524         data_blob_free(&tkt_wrapped);
1525         return NT_STATUS_OK;
1526 #else
1527         return NT_STATUS_INVALID_PARAMETER;
1528 #endif
1529 }
1530
1531 /*******************************************************************
1532  Creates SPNEGO NTLMSSP auth bind.
1533  ********************************************************************/
1534
1535 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1536                                                 enum pipe_auth_level auth_level,
1537                                                 RPC_HDR_AUTH *pauth_out,
1538                                                 prs_struct *auth_data)
1539 {
1540         NTSTATUS nt_status;
1541         DATA_BLOB null_blob = data_blob_null;
1542         DATA_BLOB request = data_blob_null;
1543         DATA_BLOB spnego_msg = data_blob_null;
1544
1545         /* We may change the pad length before marshalling. */
1546         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1547
1548         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1549         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1550                                         null_blob,
1551                                         &request);
1552
1553         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1554                 data_blob_free(&request);
1555                 prs_mem_free(auth_data);
1556                 return nt_status;
1557         }
1558
1559         /* Wrap this in SPNEGO. */
1560         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1561
1562         data_blob_free(&request);
1563
1564         /* Auth len in the rpc header doesn't include auth_header. */
1565         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1566                 data_blob_free(&spnego_msg);
1567                 prs_mem_free(auth_data);
1568                 return NT_STATUS_NO_MEMORY;
1569         }
1570
1571         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1572         dump_data(5, spnego_msg.data, spnego_msg.length);
1573
1574         data_blob_free(&spnego_msg);
1575         return NT_STATUS_OK;
1576 }
1577
1578 /*******************************************************************
1579  Creates NTLMSSP auth bind.
1580  ********************************************************************/
1581
1582 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1583                                                 enum pipe_auth_level auth_level,
1584                                                 RPC_HDR_AUTH *pauth_out,
1585                                                 prs_struct *auth_data)
1586 {
1587         NTSTATUS nt_status;
1588         DATA_BLOB null_blob = data_blob_null;
1589         DATA_BLOB request = data_blob_null;
1590
1591         /* We may change the pad length before marshalling. */
1592         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1593
1594         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1595         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1596                                         null_blob,
1597                                         &request);
1598
1599         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1600                 data_blob_free(&request);
1601                 prs_mem_free(auth_data);
1602                 return nt_status;
1603         }
1604
1605         /* Auth len in the rpc header doesn't include auth_header. */
1606         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1607                 data_blob_free(&request);
1608                 prs_mem_free(auth_data);
1609                 return NT_STATUS_NO_MEMORY;
1610         }
1611
1612         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1613         dump_data(5, request.data, request.length);
1614
1615         data_blob_free(&request);
1616         return NT_STATUS_OK;
1617 }
1618
1619 /*******************************************************************
1620  Creates schannel auth bind.
1621  ********************************************************************/
1622
1623 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1624                                                 enum pipe_auth_level auth_level,
1625                                                 RPC_HDR_AUTH *pauth_out,
1626                                                 prs_struct *auth_data)
1627 {
1628         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1629
1630         /* We may change the pad length before marshalling. */
1631         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1632
1633         /* Use lp_workgroup() if domain not specified */
1634
1635         if (!cli->auth->domain || !cli->auth->domain[0]) {
1636                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1637                 if (cli->auth->domain == NULL) {
1638                         return NT_STATUS_NO_MEMORY;
1639                 }
1640         }
1641
1642         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1643                                    global_myname());
1644
1645         /*
1646          * Now marshall the data into the auth parse_struct.
1647          */
1648
1649         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1650                                        &schannel_neg, auth_data, 0)) {
1651                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1652                 prs_mem_free(auth_data);
1653                 return NT_STATUS_NO_MEMORY;
1654         }
1655
1656         return NT_STATUS_OK;
1657 }
1658
1659 /*******************************************************************
1660  Creates the internals of a DCE/RPC bind request or alter context PDU.
1661  ********************************************************************/
1662
1663 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1664                                                 prs_struct *rpc_out, 
1665                                                 uint32 rpc_call_id,
1666                                                 const RPC_IFACE *abstract,
1667                                                 const RPC_IFACE *transfer,
1668                                                 RPC_HDR_AUTH *phdr_auth,
1669                                                 prs_struct *pauth_info)
1670 {
1671         RPC_HDR hdr;
1672         RPC_HDR_RB hdr_rb;
1673         RPC_CONTEXT rpc_ctx;
1674         uint16 auth_len = prs_offset(pauth_info);
1675         uint8 ss_padding_len = 0;
1676         uint16 frag_len = 0;
1677
1678         /* create the RPC context. */
1679         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1680
1681         /* create the bind request RPC_HDR_RB */
1682         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1683
1684         /* Start building the frag length. */
1685         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1686
1687         /* Do we need to pad ? */
1688         if (auth_len) {
1689                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1690                 if (data_len % 8) {
1691                         ss_padding_len = 8 - (data_len % 8);
1692                         phdr_auth->auth_pad_len = ss_padding_len;
1693                 }
1694                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1695         }
1696
1697         /* Create the request RPC_HDR */
1698         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1699
1700         /* Marshall the RPC header */
1701         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1702                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1703                 return NT_STATUS_NO_MEMORY;
1704         }
1705
1706         /* Marshall the bind request data */
1707         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1708                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1709                 return NT_STATUS_NO_MEMORY;
1710         }
1711
1712         /*
1713          * Grow the outgoing buffer to store any auth info.
1714          */
1715
1716         if(auth_len != 0) {
1717                 if (ss_padding_len) {
1718                         char pad[8];
1719                         memset(pad, '\0', 8);
1720                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1721                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1722                                 return NT_STATUS_NO_MEMORY;
1723                         }
1724                 }
1725
1726                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1727                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1728                         return NT_STATUS_NO_MEMORY;
1729                 }
1730
1731
1732                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1733                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1734                         return NT_STATUS_NO_MEMORY;
1735                 }
1736         }
1737
1738         return NT_STATUS_OK;
1739 }
1740
1741 /*******************************************************************
1742  Creates a DCE/RPC bind request.
1743  ********************************************************************/
1744
1745 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1746                                 prs_struct *rpc_out, 
1747                                 uint32 rpc_call_id,
1748                                 const RPC_IFACE *abstract,
1749                                 const RPC_IFACE *transfer,
1750                                 enum pipe_auth_type auth_type,
1751                                 enum pipe_auth_level auth_level)
1752 {
1753         RPC_HDR_AUTH hdr_auth;
1754         prs_struct auth_info;
1755         NTSTATUS ret = NT_STATUS_OK;
1756
1757         ZERO_STRUCT(hdr_auth);
1758         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1759                 return NT_STATUS_NO_MEMORY;
1760
1761         switch (auth_type) {
1762                 case PIPE_AUTH_TYPE_SCHANNEL:
1763                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1764                         if (!NT_STATUS_IS_OK(ret)) {
1765                                 prs_mem_free(&auth_info);
1766                                 return ret;
1767                         }
1768                         break;
1769
1770                 case PIPE_AUTH_TYPE_NTLMSSP:
1771                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1772                         if (!NT_STATUS_IS_OK(ret)) {
1773                                 prs_mem_free(&auth_info);
1774                                 return ret;
1775                         }
1776                         break;
1777
1778                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1779                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1780                         if (!NT_STATUS_IS_OK(ret)) {
1781                                 prs_mem_free(&auth_info);
1782                                 return ret;
1783                         }
1784                         break;
1785
1786                 case PIPE_AUTH_TYPE_KRB5:
1787                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1788                         if (!NT_STATUS_IS_OK(ret)) {
1789                                 prs_mem_free(&auth_info);
1790                                 return ret;
1791                         }
1792                         break;
1793
1794                 case PIPE_AUTH_TYPE_NONE:
1795                         break;
1796
1797                 default:
1798                         /* "Can't" happen. */
1799                         return NT_STATUS_INVALID_INFO_CLASS;
1800         }
1801
1802         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1803                                                 rpc_out, 
1804                                                 rpc_call_id,
1805                                                 abstract,
1806                                                 transfer,
1807                                                 &hdr_auth,
1808                                                 &auth_info);
1809
1810         prs_mem_free(&auth_info);
1811         return ret;
1812 }
1813
1814 /*******************************************************************
1815  Create and add the NTLMSSP sign/seal auth header and data.
1816  ********************************************************************/
1817
1818 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1819                                         RPC_HDR *phdr,
1820                                         uint32 ss_padding_len,
1821                                         prs_struct *outgoing_pdu)
1822 {
1823         RPC_HDR_AUTH auth_info;
1824         NTSTATUS status;
1825         DATA_BLOB auth_blob = data_blob_null;
1826         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1827
1828         if (!cli->auth->a_u.ntlmssp_state) {
1829                 return NT_STATUS_INVALID_PARAMETER;
1830         }
1831
1832         /* Init and marshall the auth header. */
1833         init_rpc_hdr_auth(&auth_info,
1834                         map_pipe_auth_type_to_rpc_auth_type(
1835                                 cli->auth->auth_type),
1836                         cli->auth->auth_level,
1837                         ss_padding_len,
1838                         1 /* context id. */);
1839
1840         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1841                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1842                 data_blob_free(&auth_blob);
1843                 return NT_STATUS_NO_MEMORY;
1844         }
1845
1846         switch (cli->auth->auth_level) {
1847                 case PIPE_AUTH_LEVEL_PRIVACY:
1848                         /* Data portion is encrypted. */
1849                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1850                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1851                                         data_and_pad_len,
1852                                         (unsigned char *)prs_data_p(outgoing_pdu),
1853                                         (size_t)prs_offset(outgoing_pdu),
1854                                         &auth_blob);
1855                         if (!NT_STATUS_IS_OK(status)) {
1856                                 data_blob_free(&auth_blob);
1857                                 return status;
1858                         }
1859                         break;
1860
1861                 case PIPE_AUTH_LEVEL_INTEGRITY:
1862                         /* Data is signed. */
1863                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1864                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1865                                         data_and_pad_len,
1866                                         (unsigned char *)prs_data_p(outgoing_pdu),
1867                                         (size_t)prs_offset(outgoing_pdu),
1868                                         &auth_blob);
1869                         if (!NT_STATUS_IS_OK(status)) {
1870                                 data_blob_free(&auth_blob);
1871                                 return status;
1872                         }
1873                         break;
1874
1875                 default:
1876                         /* Can't happen. */
1877                         smb_panic("bad auth level");
1878                         /* Notreached. */
1879                         return NT_STATUS_INVALID_PARAMETER;
1880         }
1881
1882         /* Finally marshall the blob. */
1883
1884         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1885                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1886                         (unsigned int)NTLMSSP_SIG_SIZE));
1887                 data_blob_free(&auth_blob);
1888                 return NT_STATUS_NO_MEMORY;
1889         }
1890
1891         data_blob_free(&auth_blob);
1892         return NT_STATUS_OK;
1893 }
1894
1895 /*******************************************************************
1896  Create and add the schannel sign/seal auth header and data.
1897  ********************************************************************/
1898
1899 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1900                                         RPC_HDR *phdr,
1901                                         uint32 ss_padding_len,
1902                                         prs_struct *outgoing_pdu)
1903 {
1904         RPC_HDR_AUTH auth_info;
1905         RPC_AUTH_SCHANNEL_CHK verf;
1906         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1907         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1908         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1909
1910         if (!sas) {
1911                 return NT_STATUS_INVALID_PARAMETER;
1912         }
1913
1914         /* Init and marshall the auth header. */
1915         init_rpc_hdr_auth(&auth_info,
1916                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1917                         cli->auth->auth_level,
1918                         ss_padding_len,
1919                         1 /* context id. */);
1920
1921         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1922                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1923                 return NT_STATUS_NO_MEMORY;
1924         }
1925
1926         switch (cli->auth->auth_level) {
1927                 case PIPE_AUTH_LEVEL_PRIVACY:
1928                 case PIPE_AUTH_LEVEL_INTEGRITY:
1929                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1930                                 sas->seq_num));
1931
1932                         schannel_encode(sas,
1933                                         cli->auth->auth_level,
1934                                         SENDER_IS_INITIATOR,
1935                                         &verf,
1936                                         data_p,
1937                                         data_and_pad_len);
1938
1939                         sas->seq_num++;
1940                         break;
1941
1942                 default:
1943                         /* Can't happen. */
1944                         smb_panic("bad auth level");
1945                         /* Notreached. */
1946                         return NT_STATUS_INVALID_PARAMETER;
1947         }
1948
1949         /* Finally marshall the blob. */
1950         smb_io_rpc_auth_schannel_chk("",
1951                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1952                         &verf,
1953                         outgoing_pdu,
1954                         0);
1955
1956         return NT_STATUS_OK;
1957 }
1958
1959 /*******************************************************************
1960  Calculate how much data we're going to send in this packet, also
1961  work out any sign/seal padding length.
1962  ********************************************************************/
1963
1964 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1965                                         uint32 data_left,
1966                                         uint16 *p_frag_len,
1967                                         uint16 *p_auth_len,
1968                                         uint32 *p_ss_padding)
1969 {
1970         uint32 data_space, data_len;
1971
1972 #ifdef DEVELOPER
1973         if ((data_left > 0) && (sys_random() % 2)) {
1974                 data_left = MAX(data_left/2, 1);
1975         }
1976 #endif
1977
1978         switch (cli->auth->auth_level) {
1979                 case PIPE_AUTH_LEVEL_NONE:
1980                 case PIPE_AUTH_LEVEL_CONNECT:
1981                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1982                         data_len = MIN(data_space, data_left);
1983                         *p_ss_padding = 0;
1984                         *p_auth_len = 0;
1985                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1986                         return data_len;
1987
1988                 case PIPE_AUTH_LEVEL_INTEGRITY:
1989                 case PIPE_AUTH_LEVEL_PRIVACY:
1990                         /* Treat the same for all authenticated rpc requests. */
1991                         switch(cli->auth->auth_type) {
1992                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1993                                 case PIPE_AUTH_TYPE_NTLMSSP:
1994                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1995                                         break;
1996                                 case PIPE_AUTH_TYPE_SCHANNEL:
1997                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1998                                         break;
1999                                 default:
2000                                         smb_panic("bad auth type");
2001                                         break;
2002                         }
2003
2004                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2005                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2006
2007                         data_len = MIN(data_space, data_left);
2008                         *p_ss_padding = 0;
2009                         if (data_len % 8) {
2010                                 *p_ss_padding = 8 - (data_len % 8);
2011                         }
2012                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2013                                         data_len + *p_ss_padding +              /* data plus padding. */
2014                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2015                         return data_len;
2016
2017                 default:
2018                         smb_panic("bad auth level");
2019                         /* Notreached. */
2020                         return 0;
2021         }
2022 }
2023
2024 /*******************************************************************
2025  External interface.
2026  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2027  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2028  and deals with signing/sealing details.
2029  ********************************************************************/
2030
2031 struct rpc_api_pipe_req_state {
2032         struct event_context *ev;
2033         struct rpc_pipe_client *cli;
2034         uint8_t op_num;
2035         uint32_t call_id;
2036         prs_struct *req_data;
2037         uint32_t req_data_sent;
2038         prs_struct outgoing_frag;
2039         prs_struct reply_pdu;
2040 };
2041
2042 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2043 {
2044         prs_mem_free(&s->outgoing_frag);
2045         prs_mem_free(&s->reply_pdu);
2046         return 0;
2047 }
2048
2049 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2050 static void rpc_api_pipe_req_done(struct async_req *subreq);
2051 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2052                                   bool *is_last_frag);
2053
2054 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2055                                         struct event_context *ev,
2056                                         struct rpc_pipe_client *cli,
2057                                         uint8_t op_num,
2058                                         prs_struct *req_data)
2059 {
2060         struct async_req *result, *subreq;
2061         struct tevent_req *subreq2;
2062         struct rpc_api_pipe_req_state *state;
2063         NTSTATUS status;
2064         bool is_last_frag;
2065
2066         if (!async_req_setup(mem_ctx, &result, &state,
2067                              struct rpc_api_pipe_req_state)) {
2068                 return NULL;
2069         }
2070         state->ev = ev;
2071         state->cli = cli;
2072         state->op_num = op_num;
2073         state->req_data = req_data;
2074         state->req_data_sent = 0;
2075         state->call_id = get_rpc_call_id();
2076
2077         if (cli->max_xmit_frag
2078             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2079                 /* Server is screwed up ! */
2080                 status = NT_STATUS_INVALID_PARAMETER;
2081                 goto post_status;
2082         }
2083
2084         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2085
2086         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2087                       state, MARSHALL)) {
2088                 status = NT_STATUS_NO_MEMORY;
2089                 goto post_status;
2090         }
2091
2092         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2093
2094         status = prepare_next_frag(state, &is_last_frag);
2095         if (!NT_STATUS_IS_OK(status)) {
2096                 goto post_status;
2097         }
2098
2099         if (is_last_frag) {
2100                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2101                                            &state->outgoing_frag,
2102                                            RPC_RESPONSE);
2103                 if (subreq == NULL) {
2104                         status = NT_STATUS_NO_MEMORY;
2105                         goto post_status;
2106                 }
2107                 subreq->async.fn = rpc_api_pipe_req_done;
2108                 subreq->async.priv = result;
2109         } else {
2110                 subreq2 = rpc_write_send(
2111                         state, ev, cli->transport,
2112                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2113                         prs_offset(&state->outgoing_frag));
2114                 if (subreq2 == NULL) {
2115                         status = NT_STATUS_NO_MEMORY;
2116                         goto post_status;
2117                 }
2118                 tevent_req_set_callback(subreq2, rpc_api_pipe_req_write_done,
2119                                         result);
2120         }
2121         return result;
2122
2123  post_status:
2124         if (async_post_ntstatus(result, ev, status)) {
2125                 return result;
2126         }
2127         TALLOC_FREE(result);
2128         return NULL;
2129 }
2130
2131 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2132                                   bool *is_last_frag)
2133 {
2134         RPC_HDR hdr;
2135         RPC_HDR_REQ hdr_req;
2136         uint32_t data_sent_thistime;
2137         uint16_t auth_len;
2138         uint16_t frag_len;
2139         uint8_t flags = 0;
2140         uint32_t ss_padding;
2141         uint32_t data_left;
2142         char pad[8] = { 0, };
2143         NTSTATUS status;
2144
2145         data_left = prs_offset(state->req_data) - state->req_data_sent;
2146
2147         data_sent_thistime = calculate_data_len_tosend(
2148                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2149
2150         if (state->req_data_sent == 0) {
2151                 flags = RPC_FLG_FIRST;
2152         }
2153
2154         if (data_sent_thistime == data_left) {
2155                 flags |= RPC_FLG_LAST;
2156         }
2157
2158         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2159                 return NT_STATUS_NO_MEMORY;
2160         }
2161
2162         /* Create and marshall the header and request header. */
2163         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2164                      auth_len);
2165
2166         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2167                 return NT_STATUS_NO_MEMORY;
2168         }
2169
2170         /* Create the rpc request RPC_HDR_REQ */
2171         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2172                          state->op_num);
2173
2174         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2175                                 &state->outgoing_frag, 0)) {
2176                 return NT_STATUS_NO_MEMORY;
2177         }
2178
2179         /* Copy in the data, plus any ss padding. */
2180         if (!prs_append_some_prs_data(&state->outgoing_frag,
2181                                       state->req_data, state->req_data_sent,
2182                                       data_sent_thistime)) {
2183                 return NT_STATUS_NO_MEMORY;
2184         }
2185
2186         /* Copy the sign/seal padding data. */
2187         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2188                 return NT_STATUS_NO_MEMORY;
2189         }
2190
2191         /* Generate any auth sign/seal and add the auth footer. */
2192         switch (state->cli->auth->auth_type) {
2193         case PIPE_AUTH_TYPE_NONE:
2194                 status = NT_STATUS_OK;
2195                 break;
2196         case PIPE_AUTH_TYPE_NTLMSSP:
2197         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2198                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2199                                                  &state->outgoing_frag);
2200                 break;
2201         case PIPE_AUTH_TYPE_SCHANNEL:
2202                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2203                                                   &state->outgoing_frag);
2204                 break;
2205         default:
2206                 status = NT_STATUS_INVALID_PARAMETER;
2207                 break;
2208         }
2209
2210         state->req_data_sent += data_sent_thistime;
2211         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2212
2213         return status;
2214 }
2215
2216 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2217 {
2218         struct async_req *req = tevent_req_callback_data(
2219                 subreq, struct async_req);
2220         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2221                 req->private_data, struct rpc_api_pipe_req_state);
2222         struct async_req *subreq2;
2223         NTSTATUS status;
2224         bool is_last_frag;
2225
2226         status = rpc_write_recv(subreq);
2227         TALLOC_FREE(subreq);
2228         if (!NT_STATUS_IS_OK(status)) {
2229                 async_req_nterror(req, status);
2230                 return;
2231         }
2232
2233         status = prepare_next_frag(state, &is_last_frag);
2234         if (!NT_STATUS_IS_OK(status)) {
2235                 async_req_nterror(req, status);
2236                 return;
2237         }
2238
2239         if (is_last_frag) {
2240                 subreq2 = rpc_api_pipe_send(state, state->ev, state->cli,
2241                                            &state->outgoing_frag,
2242                                            RPC_RESPONSE);
2243                 if (async_req_nomem(subreq2, req)) {
2244                         return;
2245                 }
2246                 subreq2->async.fn = rpc_api_pipe_req_done;
2247                 subreq2->async.priv = req;
2248         } else {
2249                 subreq = rpc_write_send(
2250                         state, state->ev,
2251                         state->cli->transport,
2252                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2253                         prs_offset(&state->outgoing_frag));
2254                 if (async_req_nomem(subreq, req)) {
2255                         return;
2256                 }
2257                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2258                                         req);
2259         }
2260 }
2261
2262 static void rpc_api_pipe_req_done(struct async_req *subreq)
2263 {
2264         struct async_req *req = talloc_get_type_abort(
2265                 subreq->async.priv, struct async_req);
2266         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2267                 req->private_data, struct rpc_api_pipe_req_state);
2268         NTSTATUS status;
2269
2270         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2271         TALLOC_FREE(subreq);
2272         if (!NT_STATUS_IS_OK(status)) {
2273                 async_req_nterror(req, status);
2274                 return;
2275         }
2276         async_req_done(req);
2277 }
2278
2279 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2280                                prs_struct *reply_pdu)
2281 {
2282         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2283                 req->private_data, struct rpc_api_pipe_req_state);
2284         NTSTATUS status;
2285
2286         if (async_req_is_nterror(req, &status)) {
2287                 /*
2288                  * We always have to initialize to reply pdu, even if there is
2289                  * none. The rpccli_* caller routines expect this.
2290                  */
2291                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2292                 return status;
2293         }
2294
2295         *reply_pdu = state->reply_pdu;
2296         reply_pdu->mem_ctx = mem_ctx;
2297
2298         /*
2299          * Prevent state->req_pdu from being freed in
2300          * rpc_api_pipe_req_state_destructor()
2301          */
2302         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2303
2304         return NT_STATUS_OK;
2305 }
2306
2307 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2308                         uint8 op_num,
2309                         prs_struct *in_data,
2310                         prs_struct *out_data)
2311 {
2312         TALLOC_CTX *frame = talloc_stackframe();
2313         struct event_context *ev;
2314         struct async_req *req;
2315         NTSTATUS status = NT_STATUS_NO_MEMORY;
2316
2317         ev = event_context_init(frame);
2318         if (ev == NULL) {
2319                 goto fail;
2320         }
2321
2322         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2323         if (req == NULL) {
2324                 goto fail;
2325         }
2326
2327         while (req->state < ASYNC_REQ_DONE) {
2328                 event_loop_once(ev);
2329         }
2330
2331         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2332  fail:
2333         TALLOC_FREE(frame);
2334         return status;
2335 }
2336
2337 #if 0
2338 /****************************************************************************
2339  Set the handle state.
2340 ****************************************************************************/
2341
2342 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2343                                    const char *pipe_name, uint16 device_state)
2344 {
2345         bool state_set = False;
2346         char param[2];
2347         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2348         char *rparam = NULL;
2349         char *rdata = NULL;
2350         uint32 rparam_len, rdata_len;
2351
2352         if (pipe_name == NULL)
2353                 return False;
2354
2355         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2356                  cli->fnum, pipe_name, device_state));
2357
2358         /* create parameters: device state */
2359         SSVAL(param, 0, device_state);
2360
2361         /* create setup parameters. */
2362         setup[0] = 0x0001; 
2363         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2364
2365         /* send the data on \PIPE\ */
2366         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2367                     setup, 2, 0,                /* setup, length, max */
2368                     param, 2, 0,                /* param, length, max */
2369                     NULL, 0, 1024,              /* data, length, max */
2370                     &rparam, &rparam_len,        /* return param, length */
2371                     &rdata, &rdata_len))         /* return data, length */
2372         {
2373                 DEBUG(5, ("Set Handle state: return OK\n"));
2374                 state_set = True;
2375         }
2376
2377         SAFE_FREE(rparam);
2378         SAFE_FREE(rdata);
2379
2380         return state_set;
2381 }
2382 #endif
2383
2384 /****************************************************************************
2385  Check the rpc bind acknowledge response.
2386 ****************************************************************************/
2387
2388 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2389 {
2390         if ( hdr_ba->addr.len == 0) {
2391                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2392         }
2393
2394         /* check the transfer syntax */
2395         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2396              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2397                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2398                 return False;
2399         }
2400
2401         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2402                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2403                           hdr_ba->res.num_results, hdr_ba->res.reason));
2404         }
2405
2406         DEBUG(5,("check_bind_response: accepted!\n"));
2407         return True;
2408 }
2409
2410 /*******************************************************************
2411  Creates a DCE/RPC bind authentication response.
2412  This is the packet that is sent back to the server once we
2413  have received a BIND-ACK, to finish the third leg of
2414  the authentication handshake.
2415  ********************************************************************/
2416
2417 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2418                                 uint32 rpc_call_id,
2419                                 enum pipe_auth_type auth_type,
2420                                 enum pipe_auth_level auth_level,
2421                                 DATA_BLOB *pauth_blob,
2422                                 prs_struct *rpc_out)
2423 {
2424         RPC_HDR hdr;
2425         RPC_HDR_AUTH hdr_auth;
2426         uint32 pad = 0;
2427
2428         /* Create the request RPC_HDR */
2429         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2430                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2431                      pauth_blob->length );
2432
2433         /* Marshall it. */
2434         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2435                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2436                 return NT_STATUS_NO_MEMORY;
2437         }
2438
2439         /*
2440                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2441                 about padding - shouldn't this pad to length 8 ? JRA.
2442         */
2443
2444         /* 4 bytes padding. */
2445         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2446                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2447                 return NT_STATUS_NO_MEMORY;
2448         }
2449
2450         /* Create the request RPC_HDR_AUTHA */
2451         init_rpc_hdr_auth(&hdr_auth,
2452                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2453                         auth_level, 0, 1);
2454
2455         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2456                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2457                 return NT_STATUS_NO_MEMORY;
2458         }
2459
2460         /*
2461          * Append the auth data to the outgoing buffer.
2462          */
2463
2464         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2465                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2466                 return NT_STATUS_NO_MEMORY;
2467         }
2468
2469         return NT_STATUS_OK;
2470 }
2471
2472 /*******************************************************************
2473  Creates a DCE/RPC bind alter context authentication request which
2474  may contain a spnego auth blobl
2475  ********************************************************************/
2476
2477 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2478                                         const RPC_IFACE *abstract,
2479                                         const RPC_IFACE *transfer,
2480                                         enum pipe_auth_level auth_level,
2481                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2482                                         prs_struct *rpc_out)
2483 {
2484         RPC_HDR_AUTH hdr_auth;
2485         prs_struct auth_info;
2486         NTSTATUS ret = NT_STATUS_OK;
2487
2488         ZERO_STRUCT(hdr_auth);
2489         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2490                 return NT_STATUS_NO_MEMORY;
2491
2492         /* We may change the pad length before marshalling. */
2493         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2494
2495         if (pauth_blob->length) {
2496                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2497                         prs_mem_free(&auth_info);
2498                         return NT_STATUS_NO_MEMORY;
2499                 }
2500         }
2501
2502         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2503                                                 rpc_out, 
2504                                                 rpc_call_id,
2505                                                 abstract,
2506                                                 transfer,
2507                                                 &hdr_auth,
2508                                                 &auth_info);
2509         prs_mem_free(&auth_info);
2510         return ret;
2511 }
2512
2513 /****************************************************************************
2514  Do an rpc bind.
2515 ****************************************************************************/
2516
2517 struct rpc_pipe_bind_state {
2518         struct event_context *ev;
2519         struct rpc_pipe_client *cli;
2520         prs_struct rpc_out;
2521         uint32_t rpc_call_id;
2522 };
2523
2524 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2525 {
2526         prs_mem_free(&state->rpc_out);
2527         return 0;
2528 }
2529
2530 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2531 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2532                                            struct rpc_pipe_bind_state *state,
2533                                            struct rpc_hdr_info *phdr,
2534                                            prs_struct *reply_pdu);
2535 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2536 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2537                                                     struct rpc_pipe_bind_state *state,
2538                                                     struct rpc_hdr_info *phdr,
2539                                                     prs_struct *reply_pdu);
2540 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2541
2542 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2543                                      struct event_context *ev,
2544                                      struct rpc_pipe_client *cli,
2545                                      struct cli_pipe_auth_data *auth)
2546 {
2547         struct async_req *result, *subreq;
2548         struct rpc_pipe_bind_state *state;
2549         NTSTATUS status;
2550
2551         if (!async_req_setup(mem_ctx, &result, &state,
2552                              struct rpc_pipe_bind_state)) {
2553                 return NULL;
2554         }
2555
2556         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2557                 rpccli_pipe_txt(debug_ctx(), cli),
2558                 (unsigned int)auth->auth_type,
2559                 (unsigned int)auth->auth_level ));
2560
2561         state->ev = ev;
2562         state->cli = cli;
2563         state->rpc_call_id = get_rpc_call_id();
2564
2565         prs_init_empty(&state->rpc_out, state, MARSHALL);
2566         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2567
2568         cli->auth = talloc_move(cli, &auth);
2569
2570         /* Marshall the outgoing data. */
2571         status = create_rpc_bind_req(cli, &state->rpc_out,
2572                                      state->rpc_call_id,
2573                                      &cli->abstract_syntax,
2574                                      &cli->transfer_syntax,
2575                                      cli->auth->auth_type,
2576                                      cli->auth->auth_level);
2577
2578         if (!NT_STATUS_IS_OK(status)) {
2579                 goto post_status;
2580         }
2581
2582         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2583                                    RPC_BINDACK);
2584         if (subreq == NULL) {
2585                 status = NT_STATUS_NO_MEMORY;
2586                 goto post_status;
2587         }
2588         subreq->async.fn = rpc_pipe_bind_step_one_done;
2589         subreq->async.priv = result;
2590         return result;
2591
2592  post_status:
2593         if (async_post_ntstatus(result, ev, status)) {
2594                 return result;
2595         }
2596         TALLOC_FREE(result);
2597         return NULL;
2598 }
2599
2600 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2601 {
2602         struct async_req *req = talloc_get_type_abort(
2603                 subreq->async.priv, struct async_req);
2604         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2605                 req->private_data, struct rpc_pipe_bind_state);
2606         prs_struct reply_pdu;
2607         struct rpc_hdr_info hdr;
2608         struct rpc_hdr_ba_info hdr_ba;
2609         NTSTATUS status;
2610
2611         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2612         TALLOC_FREE(subreq);
2613         if (!NT_STATUS_IS_OK(status)) {
2614                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2615                           rpccli_pipe_txt(debug_ctx(), state->cli),
2616                           nt_errstr(status)));
2617                 async_req_nterror(req, status);
2618                 return;
2619         }
2620
2621         /* Unmarshall the RPC header */
2622         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2623                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2624                 prs_mem_free(&reply_pdu);
2625                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2626                 return;
2627         }
2628
2629         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2630                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2631                           "RPC_HDR_BA.\n"));
2632                 prs_mem_free(&reply_pdu);
2633                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2634                 return;
2635         }
2636
2637         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2638                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2639                 prs_mem_free(&reply_pdu);
2640                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2641                 return;
2642         }
2643
2644         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2645         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2646
2647         /*
2648          * For authenticated binds we may need to do 3 or 4 leg binds.
2649          */
2650
2651         switch(state->cli->auth->auth_type) {
2652
2653         case PIPE_AUTH_TYPE_NONE:
2654         case PIPE_AUTH_TYPE_SCHANNEL:
2655                 /* Bind complete. */
2656                 prs_mem_free(&reply_pdu);
2657                 async_req_done(req);
2658                 break;
2659
2660         case PIPE_AUTH_TYPE_NTLMSSP:
2661                 /* Need to send AUTH3 packet - no reply. */
2662                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2663                                                     &reply_pdu);
2664                 prs_mem_free(&reply_pdu);
2665                 if (!NT_STATUS_IS_OK(status)) {
2666                         async_req_nterror(req, status);
2667                 }
2668                 break;
2669
2670         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2671                 /* Need to send alter context request and reply. */
2672                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2673                                                              &reply_pdu);
2674                 prs_mem_free(&reply_pdu);
2675                 if (!NT_STATUS_IS_OK(status)) {
2676                         async_req_nterror(req, status);
2677                 }
2678                 break;
2679
2680         case PIPE_AUTH_TYPE_KRB5:
2681                 /* */
2682
2683         default:
2684                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2685                          (unsigned int)state->cli->auth->auth_type));
2686                 prs_mem_free(&reply_pdu);
2687                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2688         }
2689 }
2690
2691 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2692                                            struct rpc_pipe_bind_state *state,
2693                                            struct rpc_hdr_info *phdr,
2694                                            prs_struct *reply_pdu)
2695 {
2696         DATA_BLOB server_response = data_blob_null;
2697         DATA_BLOB client_reply = data_blob_null;
2698         struct rpc_hdr_auth_info hdr_auth;
2699         struct tevent_req *subreq;
2700         NTSTATUS status;
2701
2702         if ((phdr->auth_len == 0)
2703             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2704                 return NT_STATUS_INVALID_PARAMETER;
2705         }
2706
2707         if (!prs_set_offset(
2708                     reply_pdu,
2709                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2710                 return NT_STATUS_INVALID_PARAMETER;
2711         }
2712
2713         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2714                 return NT_STATUS_INVALID_PARAMETER;
2715         }
2716
2717         /* TODO - check auth_type/auth_level match. */
2718
2719         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2720         prs_copy_data_out((char *)server_response.data, reply_pdu,
2721                           phdr->auth_len);
2722
2723         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2724                                 server_response, &client_reply);
2725
2726         if (!NT_STATUS_IS_OK(status)) {
2727                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2728                           "blob failed: %s.\n", nt_errstr(status)));
2729                 return status;
2730         }
2731
2732         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2733
2734         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2735                                        state->cli->auth->auth_type,
2736                                        state->cli->auth->auth_level,
2737                                        &client_reply, &state->rpc_out);
2738         data_blob_free(&client_reply);
2739
2740         if (!NT_STATUS_IS_OK(status)) {
2741                 return status;
2742         }
2743
2744         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2745                                 (uint8_t *)prs_data_p(&state->rpc_out),
2746                                 prs_offset(&state->rpc_out));
2747         if (subreq == NULL) {
2748                 return NT_STATUS_NO_MEMORY;
2749         }
2750         tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2751         return NT_STATUS_OK;
2752 }
2753
2754 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2755 {
2756         struct async_req *req = tevent_req_callback_data(
2757                 subreq, struct async_req);
2758         NTSTATUS status;
2759
2760         status = rpc_write_recv(subreq);
2761         TALLOC_FREE(subreq);
2762         if (!NT_STATUS_IS_OK(status)) {
2763                 async_req_nterror(req, status);
2764                 return;
2765         }
2766         async_req_done(req);
2767 }
2768
2769 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2770                                                     struct rpc_pipe_bind_state *state,
2771                                                     struct rpc_hdr_info *phdr,
2772                                                     prs_struct *reply_pdu)
2773 {
2774         DATA_BLOB server_spnego_response = data_blob_null;
2775         DATA_BLOB server_ntlm_response = data_blob_null;
2776         DATA_BLOB client_reply = data_blob_null;
2777         DATA_BLOB tmp_blob = data_blob_null;
2778         RPC_HDR_AUTH hdr_auth;
2779         struct async_req *subreq;
2780         NTSTATUS status;
2781
2782         if ((phdr->auth_len == 0)
2783             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2784                 return NT_STATUS_INVALID_PARAMETER;
2785         }
2786
2787         /* Process the returned NTLMSSP blob first. */
2788         if (!prs_set_offset(
2789                     reply_pdu,
2790                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2791                 return NT_STATUS_INVALID_PARAMETER;
2792         }
2793
2794         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2795                 return NT_STATUS_INVALID_PARAMETER;
2796         }
2797
2798         server_spnego_response = data_blob(NULL, phdr->auth_len);
2799         prs_copy_data_out((char *)server_spnego_response.data,
2800                           reply_pdu, phdr->auth_len);
2801
2802         /*
2803          * The server might give us back two challenges - tmp_blob is for the
2804          * second.
2805          */
2806         if (!spnego_parse_challenge(server_spnego_response,
2807                                     &server_ntlm_response, &tmp_blob)) {
2808                 data_blob_free(&server_spnego_response);
2809                 data_blob_free(&server_ntlm_response);
2810                 data_blob_free(&tmp_blob);
2811                 return NT_STATUS_INVALID_PARAMETER;
2812         }
2813
2814         /* We're finished with the server spnego response and the tmp_blob. */
2815         data_blob_free(&server_spnego_response);
2816         data_blob_free(&tmp_blob);
2817
2818         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2819                                 server_ntlm_response, &client_reply);
2820
2821         /* Finished with the server_ntlm response */
2822         data_blob_free(&server_ntlm_response);
2823
2824         if (!NT_STATUS_IS_OK(status)) {
2825                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2826                           "using server blob failed.\n"));
2827                 data_blob_free(&client_reply);
2828                 return status;
2829         }
2830
2831         /* SPNEGO wrap the client reply. */
2832         tmp_blob = spnego_gen_auth(client_reply);
2833         data_blob_free(&client_reply);
2834         client_reply = tmp_blob;
2835         tmp_blob = data_blob_null;
2836
2837         /* Now prepare the alter context pdu. */
2838         prs_init_empty(&state->rpc_out, state, MARSHALL);
2839
2840         status = create_rpc_alter_context(state->rpc_call_id,
2841                                           &state->cli->abstract_syntax,
2842                                           &state->cli->transfer_syntax,
2843                                           state->cli->auth->auth_level,
2844                                           &client_reply,
2845                                           &state->rpc_out);
2846         data_blob_free(&client_reply);
2847
2848         if (!NT_STATUS_IS_OK(status)) {
2849                 return status;
2850         }
2851
2852         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2853                                    &state->rpc_out, RPC_ALTCONTRESP);
2854         if (subreq == NULL) {
2855                 return NT_STATUS_NO_MEMORY;
2856         }
2857         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2858         subreq->async.priv = req;
2859         return NT_STATUS_OK;
2860 }
2861
2862 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2863 {
2864         struct async_req *req = talloc_get_type_abort(
2865                 subreq->async.priv, struct async_req);
2866         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2867                 req->private_data, struct rpc_pipe_bind_state);
2868         DATA_BLOB server_spnego_response = data_blob_null;
2869         DATA_BLOB tmp_blob = data_blob_null;
2870         prs_struct reply_pdu;
2871         struct rpc_hdr_info hdr;
2872         struct rpc_hdr_auth_info hdr_auth;
2873         NTSTATUS status;
2874
2875         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2876         TALLOC_FREE(subreq);
2877         if (!NT_STATUS_IS_OK(status)) {
2878                 async_req_nterror(req, status);
2879                 return;
2880         }
2881
2882         /* Get the auth blob from the reply. */
2883         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2884                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2885                           "unmarshall RPC_HDR.\n"));
2886                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2887                 return;
2888         }
2889
2890         if (!prs_set_offset(
2891                     &reply_pdu,
2892                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2893                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2894                 return;
2895         }
2896
2897         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2898                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2899                 return;
2900         }
2901
2902         server_spnego_response = data_blob(NULL, hdr.auth_len);
2903         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2904                           hdr.auth_len);
2905
2906         /* Check we got a valid auth response. */
2907         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2908                                         OID_NTLMSSP, &tmp_blob)) {
2909                 data_blob_free(&server_spnego_response);
2910                 data_blob_free(&tmp_blob);
2911                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2912                 return;
2913         }
2914
2915         data_blob_free(&server_spnego_response);
2916         data_blob_free(&tmp_blob);
2917
2918         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2919                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2920         async_req_done(req);
2921 }
2922
2923 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2924 {
2925         return async_req_simple_recv_ntstatus(req);
2926 }
2927
2928 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2929                        struct cli_pipe_auth_data *auth)
2930 {
2931         TALLOC_CTX *frame = talloc_stackframe();
2932         struct event_context *ev;
2933         struct async_req *req;
2934         NTSTATUS status = NT_STATUS_NO_MEMORY;
2935
2936         ev = event_context_init(frame);
2937         if (ev == NULL) {
2938                 goto fail;
2939         }
2940
2941         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2942         if (req == NULL) {
2943                 goto fail;
2944         }
2945
2946         while (req->state < ASYNC_REQ_DONE) {
2947                 event_loop_once(ev);
2948         }
2949
2950         status = rpc_pipe_bind_recv(req);
2951  fail:
2952         TALLOC_FREE(frame);
2953         return status;
2954 }
2955
2956 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2957                                 unsigned int timeout)
2958 {
2959         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2960
2961         if (cli == NULL) {
2962                 return 0;
2963         }
2964         return cli_set_timeout(cli, timeout);
2965 }
2966
2967 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2968 {
2969         struct cli_state *cli;
2970
2971         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2972             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2973                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2974                 return true;
2975         }
2976
2977         cli = rpc_pipe_np_smb_conn(rpc_cli);
2978         if (cli == NULL) {
2979                 return false;
2980         }
2981         E_md4hash(cli->password ? cli->password : "", nt_hash);
2982         return true;
2983 }
2984
2985 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2986                                struct cli_pipe_auth_data **presult)
2987 {
2988         struct cli_pipe_auth_data *result;
2989
2990         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2991         if (result == NULL) {
2992                 return NT_STATUS_NO_MEMORY;
2993         }
2994
2995         result->auth_type = PIPE_AUTH_TYPE_NONE;
2996         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2997
2998         result->user_name = talloc_strdup(result, "");
2999         result->domain = talloc_strdup(result, "");
3000         if ((result->user_name == NULL) || (result->domain == NULL)) {
3001                 TALLOC_FREE(result);
3002                 return NT_STATUS_NO_MEMORY;
3003         }
3004
3005         *presult = result;
3006         return NT_STATUS_OK;
3007 }
3008
3009 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3010 {
3011         ntlmssp_end(&auth->a_u.ntlmssp_state);
3012         return 0;
3013 }
3014
3015 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3016                                   enum pipe_auth_type auth_type,
3017                                   enum pipe_auth_level auth_level,
3018                                   const char *domain,
3019                                   const char *username,
3020                                   const char *password,
3021                                   struct cli_pipe_auth_data **presult)
3022 {
3023         struct cli_pipe_auth_data *result;
3024         NTSTATUS status;
3025
3026         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3027         if (result == NULL) {
3028                 return NT_STATUS_NO_MEMORY;
3029         }
3030
3031         result->auth_type = auth_type;
3032         result->auth_level = auth_level;
3033
3034         result->user_name = talloc_strdup(result, username);
3035         result->domain = talloc_strdup(result, domain);
3036         if ((result->user_name == NULL) || (result->domain == NULL)) {
3037                 status = NT_STATUS_NO_MEMORY;
3038                 goto fail;
3039         }
3040
3041         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3042         if (!NT_STATUS_IS_OK(status)) {
3043                 goto fail;
3044         }
3045
3046         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3047
3048         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3049         if (!NT_STATUS_IS_OK(status)) {
3050                 goto fail;
3051         }
3052
3053         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3054         if (!NT_STATUS_IS_OK(status)) {
3055                 goto fail;
3056         }
3057
3058         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3059         if (!NT_STATUS_IS_OK(status)) {
3060                 goto fail;
3061         }
3062
3063         /*
3064          * Turn off sign+seal to allow selected auth level to turn it back on.
3065          */
3066         result->a_u.ntlmssp_state->neg_flags &=
3067                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3068
3069         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3070                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3071         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3072                 result->a_u.ntlmssp_state->neg_flags
3073                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3074         }
3075
3076         *presult = result;
3077         return NT_STATUS_OK;
3078
3079  fail:
3080         TALLOC_FREE(result);
3081         return status;
3082 }
3083
3084 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3085                                    enum pipe_auth_level auth_level,
3086                                    const uint8_t sess_key[16],
3087                                    struct cli_pipe_auth_data **presult)
3088 {
3089         struct cli_pipe_auth_data *result;
3090
3091         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3092         if (result == NULL) {
3093                 return NT_STATUS_NO_MEMORY;
3094         }
3095
3096         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3097         result->auth_level = auth_level;
3098
3099         result->user_name = talloc_strdup(result, "");
3100         result->domain = talloc_strdup(result, domain);
3101         if ((result->user_name == NULL) || (result->domain == NULL)) {
3102                 goto fail;
3103         }
3104
3105         result->a_u.schannel_auth = talloc(result,
3106                                            struct schannel_auth_struct);
3107         if (result->a_u.schannel_auth == NULL) {
3108                 goto fail;
3109         }
3110
3111         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3112                sizeof(result->a_u.schannel_auth->sess_key));
3113         result->a_u.schannel_auth->seq_num = 0;
3114
3115         *presult = result;
3116         return NT_STATUS_OK;
3117
3118  fail:
3119         TALLOC_FREE(result);
3120         return NT_STATUS_NO_MEMORY;
3121 }
3122
3123 #ifdef HAVE_KRB5
3124 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3125 {
3126         data_blob_free(&auth->session_key);
3127         return 0;
3128 }
3129 #endif
3130
3131 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3132                                    enum pipe_auth_level auth_level,
3133                                    const char *service_princ,
3134                                    const char *username,
3135                                    const char *password,
3136                                    struct cli_pipe_auth_data **presult)
3137 {
3138 #ifdef HAVE_KRB5
3139         struct cli_pipe_auth_data *result;
3140
3141         if ((username != NULL) && (password != NULL)) {
3142                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3143                 if (ret != 0) {
3144                         return NT_STATUS_ACCESS_DENIED;
3145                 }
3146         }
3147
3148         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3149         if (result == NULL) {
3150                 return NT_STATUS_NO_MEMORY;
3151         }
3152
3153         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3154         result->auth_level = auth_level;
3155
3156         /*
3157          * Username / domain need fixing!
3158          */