Convert rpc_read to tevent_req
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct async_req *subreq);
211
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                         struct event_context *ev,
214                                         struct rpc_cli_transport *transport,
215                                         uint8_t *data, size_t size)
216 {
217         struct tevent_req *req;
218         struct async_req *subreq;
219         struct rpc_read_state *state;
220
221         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
222         if (req == NULL) {
223                 return NULL;
224         }
225         state->ev = ev;
226         state->transport = transport;
227         state->data = data;
228         state->size = size;
229         state->num_read = 0;
230
231         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232
233         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
234                                       transport->priv);
235         if (subreq == NULL) {
236                 goto fail;
237         }
238         subreq->async.fn = rpc_read_done;
239         subreq->async.priv = req;
240         return req;
241
242  fail:
243         TALLOC_FREE(req);
244         return NULL;
245 }
246
247 static void rpc_read_done(struct async_req *subreq)
248 {
249         struct tevent_req *req = talloc_get_type_abort(
250                 subreq->async.priv, struct tevent_req);
251         struct rpc_read_state *state = tevent_req_data(
252                 req, struct rpc_read_state);
253         NTSTATUS status;
254         ssize_t received;
255
256         status = state->transport->read_recv(subreq, &received);
257         TALLOC_FREE(subreq);
258         if (!NT_STATUS_IS_OK(status)) {
259                 tevent_req_nterror(req, status);
260                 return;
261         }
262
263         state->num_read += received;
264         if (state->num_read == state->size) {
265                 tevent_req_done(req);
266                 return;
267         }
268
269         subreq = state->transport->read_send(state, state->ev,
270                                              state->data + state->num_read,
271                                              state->size - state->num_read,
272                                              state->transport->priv);
273         if (tevent_req_nomem(subreq, req)) {
274                 return;
275         }
276         subreq->async.fn = rpc_read_done;
277         subreq->async.priv = req;
278 }
279
280 static NTSTATUS rpc_read_recv(struct tevent_req *req)
281 {
282         return tevent_req_simple_recv_ntstatus(req);
283 }
284
285 struct rpc_write_state {
286         struct event_context *ev;
287         struct rpc_cli_transport *transport;
288         const uint8_t *data;
289         size_t size;
290         size_t num_written;
291 };
292
293 static void rpc_write_done(struct async_req *subreq);
294
295 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
296                                         struct event_context *ev,
297                                         struct rpc_cli_transport *transport,
298                                         const uint8_t *data, size_t size)
299 {
300         struct async_req *result, *subreq;
301         struct rpc_write_state *state;
302
303         if (!async_req_setup(mem_ctx, &result, &state,
304                              struct rpc_write_state)) {
305                 return NULL;
306         }
307         state->ev = ev;
308         state->transport = transport;
309         state->data = data;
310         state->size = size;
311         state->num_written = 0;
312
313         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
314
315         subreq = transport->write_send(state, ev, data, size, transport->priv);
316         if (subreq == NULL) {
317                 goto fail;
318         }
319         subreq->async.fn = rpc_write_done;
320         subreq->async.priv = result;
321         return result;
322  fail:
323         TALLOC_FREE(result);
324         return NULL;
325 }
326
327 static void rpc_write_done(struct async_req *subreq)
328 {
329         struct async_req *req = talloc_get_type_abort(
330                 subreq->async.priv, struct async_req);
331         struct rpc_write_state *state = talloc_get_type_abort(
332                 req->private_data, struct rpc_write_state);
333         NTSTATUS status;
334         ssize_t written;
335
336         status = state->transport->write_recv(subreq, &written);
337         TALLOC_FREE(subreq);
338         if (!NT_STATUS_IS_OK(status)) {
339                 async_req_nterror(req, status);
340                 return;
341         }
342
343         state->num_written += written;
344
345         if (state->num_written == state->size) {
346                 async_req_done(req);
347                 return;
348         }
349
350         subreq = state->transport->write_send(state, state->ev,
351                                               state->data + state->num_written,
352                                               state->size - state->num_written,
353                                               state->transport->priv);
354         if (async_req_nomem(subreq, req)) {
355                 return;
356         }
357         subreq->async.fn = rpc_write_done;
358         subreq->async.priv = req;
359 }
360
361 static NTSTATUS rpc_write_recv(struct async_req *req)
362 {
363         return async_req_simple_recv_ntstatus(req);
364 }
365
366
367 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
368                                  struct rpc_hdr_info *prhdr,
369                                  prs_struct *pdu)
370 {
371         /*
372          * This next call sets the endian bit correctly in current_pdu. We
373          * will propagate this to rbuf later.
374          */
375
376         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
377                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
378                 return NT_STATUS_BUFFER_TOO_SMALL;
379         }
380
381         if (prhdr->frag_len > cli->max_recv_frag) {
382                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
383                           " we only allow %d\n", (int)prhdr->frag_len,
384                           (int)cli->max_recv_frag));
385                 return NT_STATUS_BUFFER_TOO_SMALL;
386         }
387
388         return NT_STATUS_OK;
389 }
390
391 /****************************************************************************
392  Try and get a PDU's worth of data from current_pdu. If not, then read more
393  from the wire.
394  ****************************************************************************/
395
396 struct get_complete_frag_state {
397         struct event_context *ev;
398         struct rpc_pipe_client *cli;
399         struct rpc_hdr_info *prhdr;
400         prs_struct *pdu;
401 };
402
403 static void get_complete_frag_got_header(struct tevent_req *subreq);
404 static void get_complete_frag_got_rest(struct tevent_req *subreq);
405
406 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
407                                                struct event_context *ev,
408                                                struct rpc_pipe_client *cli,
409                                                struct rpc_hdr_info *prhdr,
410                                                prs_struct *pdu)
411 {
412         struct async_req *result;
413         struct tevent_req *subreq;
414         struct get_complete_frag_state *state;
415         uint32_t pdu_len;
416         NTSTATUS status;
417
418         if (!async_req_setup(mem_ctx, &result, &state,
419                              struct get_complete_frag_state)) {
420                 return NULL;
421         }
422         state->ev = ev;
423         state->cli = cli;
424         state->prhdr = prhdr;
425         state->pdu = pdu;
426
427         pdu_len = prs_data_size(pdu);
428         if (pdu_len < RPC_HEADER_LEN) {
429                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
430                         status = NT_STATUS_NO_MEMORY;
431                         goto post_status;
432                 }
433                 subreq = rpc_read_send(
434                         state, state->ev,
435                         state->cli->transport,
436                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
437                         RPC_HEADER_LEN - pdu_len);
438                 if (subreq == NULL) {
439                         status = NT_STATUS_NO_MEMORY;
440                         goto post_status;
441                 }
442                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
443                                         result);
444                 return result;
445         }
446
447         status = parse_rpc_header(cli, prhdr, pdu);
448         if (!NT_STATUS_IS_OK(status)) {
449                 goto post_status;
450         }
451
452         /*
453          * Ensure we have frag_len bytes of data.
454          */
455         if (pdu_len < prhdr->frag_len) {
456                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
457                         status = NT_STATUS_NO_MEMORY;
458                         goto post_status;
459                 }
460                 subreq = rpc_read_send(state, state->ev,
461                                        state->cli->transport,
462                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
463                                        prhdr->frag_len - pdu_len);
464                 if (subreq == NULL) {
465                         status = NT_STATUS_NO_MEMORY;
466                         goto post_status;
467                 }
468                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
469                                         result);
470                 return result;
471         }
472
473         status = NT_STATUS_OK;
474  post_status:
475         if (async_post_ntstatus(result, ev, status)) {
476                 return result;
477         }
478         TALLOC_FREE(result);
479         return NULL;
480 }
481
482 static void get_complete_frag_got_header(struct tevent_req *subreq)
483 {
484         struct async_req *req = tevent_req_callback_data(
485                 subreq, struct async_req);
486         struct get_complete_frag_state *state = talloc_get_type_abort(
487                 req->private_data, struct get_complete_frag_state);
488         NTSTATUS status;
489
490         status = rpc_read_recv(subreq);
491         TALLOC_FREE(subreq);
492         if (!NT_STATUS_IS_OK(status)) {
493                 async_req_nterror(req, status);
494                 return;
495         }
496
497         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
498         if (!NT_STATUS_IS_OK(status)) {
499                 async_req_nterror(req, status);
500                 return;
501         }
502
503         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
504                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
505                 return;
506         }
507
508         /*
509          * We're here in this piece of code because we've read exactly
510          * RPC_HEADER_LEN bytes into state->pdu.
511          */
512
513         subreq = rpc_read_send(
514                 state, state->ev, state->cli->transport,
515                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
516                 state->prhdr->frag_len - RPC_HEADER_LEN);
517         if (async_req_nomem(subreq, req)) {
518                 return;
519         }
520         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
521 }
522
523 static void get_complete_frag_got_rest(struct tevent_req *subreq)
524 {
525         struct async_req *req = tevent_req_callback_data(
526                 subreq, struct async_req);
527         NTSTATUS status;
528
529         status = rpc_read_recv(subreq);
530         TALLOC_FREE(subreq);
531         if (!NT_STATUS_IS_OK(status)) {
532                 async_req_nterror(req, status);
533                 return;
534         }
535         async_req_done(req);
536 }
537
538 static NTSTATUS get_complete_frag_recv(struct async_req *req)
539 {
540         return async_req_simple_recv_ntstatus(req);
541 }
542
543 /****************************************************************************
544  NTLMSSP specific sign/seal.
545  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
546  In fact I should probably abstract these into identical pieces of code... JRA.
547  ****************************************************************************/
548
549 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
550                                 prs_struct *current_pdu,
551                                 uint8 *p_ss_padding_len)
552 {
553         RPC_HDR_AUTH auth_info;
554         uint32 save_offset = prs_offset(current_pdu);
555         uint32 auth_len = prhdr->auth_len;
556         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
557         unsigned char *data = NULL;
558         size_t data_len;
559         unsigned char *full_packet_data = NULL;
560         size_t full_packet_data_len;
561         DATA_BLOB auth_blob;
562         NTSTATUS status;
563
564         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
565             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
566                 return NT_STATUS_OK;
567         }
568
569         if (!ntlmssp_state) {
570                 return NT_STATUS_INVALID_PARAMETER;
571         }
572
573         /* Ensure there's enough data for an authenticated response. */
574         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
575                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
576                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
577                         (unsigned int)auth_len ));
578                 return NT_STATUS_BUFFER_TOO_SMALL;
579         }
580
581         /*
582          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
583          * after the RPC header.
584          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
585          * functions as NTLMv2 checks the rpc headers also.
586          */
587
588         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
589         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
590
591         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
592         full_packet_data_len = prhdr->frag_len - auth_len;
593
594         /* Pull the auth header and the following data into a blob. */
595         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
596                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
597                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
598                 return NT_STATUS_BUFFER_TOO_SMALL;
599         }
600
601         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
602                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
603                 return NT_STATUS_BUFFER_TOO_SMALL;
604         }
605
606         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
607         auth_blob.length = auth_len;
608
609         switch (cli->auth->auth_level) {
610                 case PIPE_AUTH_LEVEL_PRIVACY:
611                         /* Data is encrypted. */
612                         status = ntlmssp_unseal_packet(ntlmssp_state,
613                                                         data, data_len,
614                                                         full_packet_data,
615                                                         full_packet_data_len,
616                                                         &auth_blob);
617                         if (!NT_STATUS_IS_OK(status)) {
618                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
619                                         "packet from %s. Error was %s.\n",
620                                         rpccli_pipe_txt(debug_ctx(), cli),
621                                         nt_errstr(status) ));
622                                 return status;
623                         }
624                         break;
625                 case PIPE_AUTH_LEVEL_INTEGRITY:
626                         /* Data is signed. */
627                         status = ntlmssp_check_packet(ntlmssp_state,
628                                                         data, data_len,
629                                                         full_packet_data,
630                                                         full_packet_data_len,
631                                                         &auth_blob);
632                         if (!NT_STATUS_IS_OK(status)) {
633                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
634                                         "packet from %s. Error was %s.\n",
635                                         rpccli_pipe_txt(debug_ctx(), cli),
636                                         nt_errstr(status) ));
637                                 return status;
638                         }
639                         break;
640                 default:
641                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
642                                   "auth level %d\n", cli->auth->auth_level));
643                         return NT_STATUS_INVALID_INFO_CLASS;
644         }
645
646         /*
647          * Return the current pointer to the data offset.
648          */
649
650         if(!prs_set_offset(current_pdu, save_offset)) {
651                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
652                         (unsigned int)save_offset ));
653                 return NT_STATUS_BUFFER_TOO_SMALL;
654         }
655
656         /*
657          * Remember the padding length. We must remove it from the real data
658          * stream once the sign/seal is done.
659          */
660
661         *p_ss_padding_len = auth_info.auth_pad_len;
662
663         return NT_STATUS_OK;
664 }
665
666 /****************************************************************************
667  schannel specific sign/seal.
668  ****************************************************************************/
669
670 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
671                                 prs_struct *current_pdu,
672                                 uint8 *p_ss_padding_len)
673 {
674         RPC_HDR_AUTH auth_info;
675         RPC_AUTH_SCHANNEL_CHK schannel_chk;
676         uint32 auth_len = prhdr->auth_len;
677         uint32 save_offset = prs_offset(current_pdu);
678         struct schannel_auth_struct *schannel_auth =
679                 cli->auth->a_u.schannel_auth;
680         uint32 data_len;
681
682         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
683             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
684                 return NT_STATUS_OK;
685         }
686
687         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
688                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
689                 return NT_STATUS_INVALID_PARAMETER;
690         }
691
692         if (!schannel_auth) {
693                 return NT_STATUS_INVALID_PARAMETER;
694         }
695
696         /* Ensure there's enough data for an authenticated response. */
697         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
698                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
699                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
700                         (unsigned int)auth_len ));
701                 return NT_STATUS_INVALID_PARAMETER;
702         }
703
704         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
705
706         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
707                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
708                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
709                 return NT_STATUS_BUFFER_TOO_SMALL;
710         }
711
712         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
713                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
714                 return NT_STATUS_BUFFER_TOO_SMALL;
715         }
716
717         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
718                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
719                         auth_info.auth_type));
720                 return NT_STATUS_BUFFER_TOO_SMALL;
721         }
722
723         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
724                                 &schannel_chk, current_pdu, 0)) {
725                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
726                 return NT_STATUS_BUFFER_TOO_SMALL;
727         }
728
729         if (!schannel_decode(schannel_auth,
730                         cli->auth->auth_level,
731                         SENDER_IS_ACCEPTOR,
732                         &schannel_chk,
733                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
734                         data_len)) {
735                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
736                                 "Connection to %s.\n",
737                                 rpccli_pipe_txt(debug_ctx(), cli)));
738                 return NT_STATUS_INVALID_PARAMETER;
739         }
740
741         /* The sequence number gets incremented on both send and receive. */
742         schannel_auth->seq_num++;
743
744         /*
745          * Return the current pointer to the data offset.
746          */
747
748         if(!prs_set_offset(current_pdu, save_offset)) {
749                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
750                         (unsigned int)save_offset ));
751                 return NT_STATUS_BUFFER_TOO_SMALL;
752         }
753
754         /*
755          * Remember the padding length. We must remove it from the real data
756          * stream once the sign/seal is done.
757          */
758
759         *p_ss_padding_len = auth_info.auth_pad_len;
760
761         return NT_STATUS_OK;
762 }
763
764 /****************************************************************************
765  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
766  ****************************************************************************/
767
768 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
769                                 prs_struct *current_pdu,
770                                 uint8 *p_ss_padding_len)
771 {
772         NTSTATUS ret = NT_STATUS_OK;
773
774         /* Paranioa checks for auth_len. */
775         if (prhdr->auth_len) {
776                 if (prhdr->auth_len > prhdr->frag_len) {
777                         return NT_STATUS_INVALID_PARAMETER;
778                 }
779
780                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
781                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
782                         /* Integer wrap attempt. */
783                         return NT_STATUS_INVALID_PARAMETER;
784                 }
785         }
786
787         /*
788          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
789          */
790
791         switch(cli->auth->auth_type) {
792                 case PIPE_AUTH_TYPE_NONE:
793                         if (prhdr->auth_len) {
794                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
795                                           "Connection to %s - got non-zero "
796                                           "auth len %u.\n",
797                                         rpccli_pipe_txt(debug_ctx(), cli),
798                                         (unsigned int)prhdr->auth_len ));
799                                 return NT_STATUS_INVALID_PARAMETER;
800                         }
801                         break;
802
803                 case PIPE_AUTH_TYPE_NTLMSSP:
804                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
805                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
806                         if (!NT_STATUS_IS_OK(ret)) {
807                                 return ret;
808                         }
809                         break;
810
811                 case PIPE_AUTH_TYPE_SCHANNEL:
812                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
813                         if (!NT_STATUS_IS_OK(ret)) {
814                                 return ret;
815                         }
816                         break;
817
818                 case PIPE_AUTH_TYPE_KRB5:
819                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
820                 default:
821                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
822                                   "to %s - unknown internal auth type %u.\n",
823                                   rpccli_pipe_txt(debug_ctx(), cli),
824                                   cli->auth->auth_type ));
825                         return NT_STATUS_INVALID_INFO_CLASS;
826         }
827
828         return NT_STATUS_OK;
829 }
830
831 /****************************************************************************
832  Do basic authentication checks on an incoming pdu.
833  ****************************************************************************/
834
835 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
836                         prs_struct *current_pdu,
837                         uint8 expected_pkt_type,
838                         char **ppdata,
839                         uint32 *pdata_len,
840                         prs_struct *return_data)
841 {
842
843         NTSTATUS ret = NT_STATUS_OK;
844         uint32 current_pdu_len = prs_data_size(current_pdu);
845
846         if (current_pdu_len != prhdr->frag_len) {
847                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
848                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
849                 return NT_STATUS_INVALID_PARAMETER;
850         }
851
852         /*
853          * Point the return values at the real data including the RPC
854          * header. Just in case the caller wants it.
855          */
856         *ppdata = prs_data_p(current_pdu);
857         *pdata_len = current_pdu_len;
858
859         /* Ensure we have the correct type. */
860         switch (prhdr->pkt_type) {
861                 case RPC_ALTCONTRESP:
862                 case RPC_BINDACK:
863
864                         /* Alter context and bind ack share the same packet definitions. */
865                         break;
866
867
868                 case RPC_RESPONSE:
869                 {
870                         RPC_HDR_RESP rhdr_resp;
871                         uint8 ss_padding_len = 0;
872
873                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
874                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
875                                 return NT_STATUS_BUFFER_TOO_SMALL;
876                         }
877
878                         /* Here's where we deal with incoming sign/seal. */
879                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
880                                         current_pdu, &ss_padding_len);
881                         if (!NT_STATUS_IS_OK(ret)) {
882                                 return ret;
883                         }
884
885                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
886                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
887
888                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
889                                 return NT_STATUS_BUFFER_TOO_SMALL;
890                         }
891
892                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
893
894                         /* Remember to remove the auth footer. */
895                         if (prhdr->auth_len) {
896                                 /* We've already done integer wrap tests on auth_len in
897                                         cli_pipe_validate_rpc_response(). */
898                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
899                                         return NT_STATUS_BUFFER_TOO_SMALL;
900                                 }
901                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
902                         }
903
904                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
905                                 current_pdu_len, *pdata_len, ss_padding_len ));
906
907                         /*
908                          * If this is the first reply, and the allocation hint is reasonably, try and
909                          * set up the return_data parse_struct to the correct size.
910                          */
911
912                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
913                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
914                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
915                                                 "too large to allocate\n",
916                                                 (unsigned int)rhdr_resp.alloc_hint ));
917                                         return NT_STATUS_NO_MEMORY;
918                                 }
919                         }
920
921                         break;
922                 }
923
924                 case RPC_BINDNACK:
925                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
926                                   "received from %s!\n",
927                                   rpccli_pipe_txt(debug_ctx(), cli)));
928                         /* Use this for now... */
929                         return NT_STATUS_NETWORK_ACCESS_DENIED;
930
931                 case RPC_FAULT:
932                 {
933                         RPC_HDR_RESP rhdr_resp;
934                         RPC_HDR_FAULT fault_resp;
935
936                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
937                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
938                                 return NT_STATUS_BUFFER_TOO_SMALL;
939                         }
940
941                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
942                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
943                                 return NT_STATUS_BUFFER_TOO_SMALL;
944                         }
945
946                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
947                                   "code %s received from %s!\n",
948                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
949                                 rpccli_pipe_txt(debug_ctx(), cli)));
950                         if (NT_STATUS_IS_OK(fault_resp.status)) {
951                                 return NT_STATUS_UNSUCCESSFUL;
952                         } else {
953                                 return fault_resp.status;
954                         }
955                 }
956
957                 default:
958                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
959                                 "from %s!\n",
960                                 (unsigned int)prhdr->pkt_type,
961                                 rpccli_pipe_txt(debug_ctx(), cli)));
962                         return NT_STATUS_INVALID_INFO_CLASS;
963         }
964
965         if (prhdr->pkt_type != expected_pkt_type) {
966                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
967                           "got an unexpected RPC packet type - %u, not %u\n",
968                         rpccli_pipe_txt(debug_ctx(), cli),
969                         prhdr->pkt_type,
970                         expected_pkt_type));
971                 return NT_STATUS_INVALID_INFO_CLASS;
972         }
973
974         /* Do this just before return - we don't want to modify any rpc header
975            data before now as we may have needed to do cryptographic actions on
976            it before. */
977
978         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
979                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
980                         "setting fragment first/last ON.\n"));
981                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
982         }
983
984         return NT_STATUS_OK;
985 }
986
987 /****************************************************************************
988  Ensure we eat the just processed pdu from the current_pdu prs_struct.
989  Normally the frag_len and buffer size will match, but on the first trans
990  reply there is a theoretical chance that buffer size > frag_len, so we must
991  deal with that.
992  ****************************************************************************/
993
994 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
995 {
996         uint32 current_pdu_len = prs_data_size(current_pdu);
997
998         if (current_pdu_len < prhdr->frag_len) {
999                 return NT_STATUS_BUFFER_TOO_SMALL;
1000         }
1001
1002         /* Common case. */
1003         if (current_pdu_len == (uint32)prhdr->frag_len) {
1004                 prs_mem_free(current_pdu);
1005                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1006                 /* Make current_pdu dynamic with no memory. */
1007                 prs_give_memory(current_pdu, 0, 0, True);
1008                 return NT_STATUS_OK;
1009         }
1010
1011         /*
1012          * Oh no ! More data in buffer than we processed in current pdu.
1013          * Cheat. Move the data down and shrink the buffer.
1014          */
1015
1016         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1017                         current_pdu_len - prhdr->frag_len);
1018
1019         /* Remember to set the read offset back to zero. */
1020         prs_set_offset(current_pdu, 0);
1021
1022         /* Shrink the buffer. */
1023         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1024                 return NT_STATUS_BUFFER_TOO_SMALL;
1025         }
1026
1027         return NT_STATUS_OK;
1028 }
1029
1030 /****************************************************************************
1031  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1032 ****************************************************************************/
1033
1034 struct cli_api_pipe_state {
1035         struct event_context *ev;
1036         struct rpc_cli_transport *transport;
1037         uint8_t *rdata;
1038         uint32_t rdata_len;
1039 };
1040
1041 static void cli_api_pipe_trans_done(struct async_req *subreq);
1042 static void cli_api_pipe_write_done(struct async_req *subreq);
1043 static void cli_api_pipe_read_done(struct async_req *subreq);
1044
1045 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1046                                            struct event_context *ev,
1047                                            struct rpc_cli_transport *transport,
1048                                            uint8_t *data, size_t data_len,
1049                                            uint32_t max_rdata_len)
1050 {
1051         struct async_req *result, *subreq;
1052         struct cli_api_pipe_state *state;
1053         NTSTATUS status;
1054
1055         if (!async_req_setup(mem_ctx, &result, &state,
1056                              struct cli_api_pipe_state)) {
1057                 return NULL;
1058         }
1059         state->ev = ev;
1060         state->transport = transport;
1061
1062         if (max_rdata_len < RPC_HEADER_LEN) {
1063                 /*
1064                  * For a RPC reply we always need at least RPC_HEADER_LEN
1065                  * bytes. We check this here because we will receive
1066                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1067                  */
1068                 status = NT_STATUS_INVALID_PARAMETER;
1069                 goto post_status;
1070         }
1071
1072         if (transport->trans_send != NULL) {
1073                 subreq = transport->trans_send(state, ev, data, data_len,
1074                                                max_rdata_len, transport->priv);
1075                 if (subreq == NULL) {
1076                         status = NT_STATUS_NO_MEMORY;
1077                         goto post_status;
1078                 }
1079                 subreq->async.fn = cli_api_pipe_trans_done;
1080                 subreq->async.priv = result;
1081                 return result;
1082         }
1083
1084         /*
1085          * If the transport does not provide a "trans" routine, i.e. for
1086          * example the ncacn_ip_tcp transport, do the write/read step here.
1087          */
1088
1089         subreq = rpc_write_send(state, ev, transport, data, data_len);
1090         if (subreq == NULL) {
1091                 goto fail;
1092         }
1093         subreq->async.fn = cli_api_pipe_write_done;
1094         subreq->async.priv = result;
1095         return result;
1096
1097         status = NT_STATUS_INVALID_PARAMETER;
1098
1099  post_status:
1100         if (async_post_ntstatus(result, ev, status)) {
1101                 return result;
1102         }
1103  fail:
1104         TALLOC_FREE(result);
1105         return NULL;
1106 }
1107
1108 static void cli_api_pipe_trans_done(struct async_req *subreq)
1109 {
1110         struct async_req *req = talloc_get_type_abort(
1111                 subreq->async.priv, struct async_req);
1112         struct cli_api_pipe_state *state = talloc_get_type_abort(
1113                 req->private_data, struct cli_api_pipe_state);
1114         NTSTATUS status;
1115
1116         status = state->transport->trans_recv(subreq, state, &state->rdata,
1117                                               &state->rdata_len);
1118         TALLOC_FREE(subreq);
1119         if (!NT_STATUS_IS_OK(status)) {
1120                 async_req_nterror(req, status);
1121                 return;
1122         }
1123         async_req_done(req);
1124 }
1125
1126 static void cli_api_pipe_write_done(struct async_req *subreq)
1127 {
1128         struct async_req *req = talloc_get_type_abort(
1129                 subreq->async.priv, struct async_req);
1130         struct cli_api_pipe_state *state = talloc_get_type_abort(
1131                 req->private_data, struct cli_api_pipe_state);
1132         NTSTATUS status;
1133
1134         status = rpc_write_recv(subreq);
1135         TALLOC_FREE(subreq);
1136         if (!NT_STATUS_IS_OK(status)) {
1137                 async_req_nterror(req, status);
1138                 return;
1139         }
1140
1141         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1142         if (async_req_nomem(state->rdata, req)) {
1143                 return;
1144         }
1145
1146         /*
1147          * We don't need to use rpc_read_send here, the upper layer will cope
1148          * with a short read, transport->trans_send could also return less
1149          * than state->max_rdata_len.
1150          */
1151         subreq = state->transport->read_send(state, state->ev, state->rdata,
1152                                              RPC_HEADER_LEN,
1153                                              state->transport->priv);
1154         if (async_req_nomem(subreq, req)) {
1155                 return;
1156         }
1157         subreq->async.fn = cli_api_pipe_read_done;
1158         subreq->async.priv = req;
1159 }
1160
1161 static void cli_api_pipe_read_done(struct async_req *subreq)
1162 {
1163         struct async_req *req = talloc_get_type_abort(
1164                 subreq->async.priv, struct async_req);
1165         struct cli_api_pipe_state *state = talloc_get_type_abort(
1166                 req->private_data, struct cli_api_pipe_state);
1167         NTSTATUS status;
1168         ssize_t received;
1169
1170         status = state->transport->read_recv(subreq, &received);
1171         TALLOC_FREE(subreq);
1172         if (!NT_STATUS_IS_OK(status)) {
1173                 async_req_nterror(req, status);
1174                 return;
1175         }
1176         state->rdata_len = received;
1177         async_req_done(req);
1178 }
1179
1180 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1181                                   uint8_t **prdata, uint32_t *prdata_len)
1182 {
1183         struct cli_api_pipe_state *state = talloc_get_type_abort(
1184                 req->private_data, struct cli_api_pipe_state);
1185         NTSTATUS status;
1186
1187         if (async_req_is_nterror(req, &status)) {
1188                 return status;
1189         }
1190
1191         *prdata = talloc_move(mem_ctx, &state->rdata);
1192         *prdata_len = state->rdata_len;
1193         return NT_STATUS_OK;
1194 }
1195
1196 /****************************************************************************
1197  Send data on an rpc pipe via trans. The prs_struct data must be the last
1198  pdu fragment of an NDR data stream.
1199
1200  Receive response data from an rpc pipe, which may be large...
1201
1202  Read the first fragment: unfortunately have to use SMBtrans for the first
1203  bit, then SMBreadX for subsequent bits.
1204
1205  If first fragment received also wasn't the last fragment, continue
1206  getting fragments until we _do_ receive the last fragment.
1207
1208  Request/Response PDU's look like the following...
1209
1210  |<------------------PDU len----------------------------------------------->|
1211  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1212
1213  +------------+-----------------+-------------+---------------+-------------+
1214  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1215  +------------+-----------------+-------------+---------------+-------------+
1216
1217  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1218  signing & sealing being negotiated.
1219
1220  ****************************************************************************/
1221
1222 struct rpc_api_pipe_state {
1223         struct event_context *ev;
1224         struct rpc_pipe_client *cli;
1225         uint8_t expected_pkt_type;
1226
1227         prs_struct incoming_frag;
1228         struct rpc_hdr_info rhdr;
1229
1230         prs_struct incoming_pdu;        /* Incoming reply */
1231         uint32_t incoming_pdu_offset;
1232 };
1233
1234 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1235 {
1236         prs_mem_free(&state->incoming_frag);
1237         prs_mem_free(&state->incoming_pdu);
1238         return 0;
1239 }
1240
1241 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1242 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1243
1244 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1245                                            struct event_context *ev,
1246                                            struct rpc_pipe_client *cli,
1247                                            prs_struct *data, /* Outgoing PDU */
1248                                            uint8_t expected_pkt_type)
1249 {
1250         struct async_req *result, *subreq;
1251         struct rpc_api_pipe_state *state;
1252         uint16_t max_recv_frag;
1253         NTSTATUS status;
1254
1255         if (!async_req_setup(mem_ctx, &result, &state,
1256                              struct rpc_api_pipe_state)) {
1257                 return NULL;
1258         }
1259         state->ev = ev;
1260         state->cli = cli;
1261         state->expected_pkt_type = expected_pkt_type;
1262         state->incoming_pdu_offset = 0;
1263
1264         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1265
1266         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1267         /* Make incoming_pdu dynamic with no memory. */
1268         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1269
1270         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1271
1272         /*
1273          * Ensure we're not sending too much.
1274          */
1275         if (prs_offset(data) > cli->max_xmit_frag) {
1276                 status = NT_STATUS_INVALID_PARAMETER;
1277                 goto post_status;
1278         }
1279
1280         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1281
1282         max_recv_frag = cli->max_recv_frag;
1283
1284 #ifdef DEVELOPER
1285         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1286 #endif
1287
1288         subreq = cli_api_pipe_send(state, ev, cli->transport,
1289                                    (uint8_t *)prs_data_p(data),
1290                                    prs_offset(data), max_recv_frag);
1291         if (subreq == NULL) {
1292                 status = NT_STATUS_NO_MEMORY;
1293                 goto post_status;
1294         }
1295         subreq->async.fn = rpc_api_pipe_trans_done;
1296         subreq->async.priv = result;
1297         return result;
1298
1299  post_status:
1300         if (async_post_ntstatus(result, ev, status)) {
1301                 return result;
1302         }
1303         TALLOC_FREE(result);
1304         return NULL;
1305 }
1306
1307 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1308 {
1309         struct async_req *req = talloc_get_type_abort(
1310                 subreq->async.priv, struct async_req);
1311         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1312                 req->private_data, struct rpc_api_pipe_state);
1313         NTSTATUS status;
1314         uint8_t *rdata = NULL;
1315         uint32_t rdata_len = 0;
1316         char *rdata_copy;
1317
1318         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1319         TALLOC_FREE(subreq);
1320         if (!NT_STATUS_IS_OK(status)) {
1321                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1322                 async_req_nterror(req, status);
1323                 return;
1324         }
1325
1326         if (rdata == NULL) {
1327                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1328                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1329                 async_req_done(req);
1330                 return;
1331         }
1332
1333         /*
1334          * Give the memory received from cli_trans as dynamic to the current
1335          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1336          * :-(
1337          */
1338         rdata_copy = (char *)memdup(rdata, rdata_len);
1339         TALLOC_FREE(rdata);
1340         if (async_req_nomem(rdata_copy, req)) {
1341                 return;
1342         }
1343         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1344
1345         /* Ensure we have enough data for a pdu. */
1346         subreq = get_complete_frag_send(state, state->ev, state->cli,
1347                                         &state->rhdr, &state->incoming_frag);
1348         if (async_req_nomem(subreq, req)) {
1349                 return;
1350         }
1351         subreq->async.fn = rpc_api_pipe_got_pdu;
1352         subreq->async.priv = req;
1353 }
1354
1355 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1356 {
1357         struct async_req *req = talloc_get_type_abort(
1358                 subreq->async.priv, struct async_req);
1359         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1360                 req->private_data, struct rpc_api_pipe_state);
1361         NTSTATUS status;
1362         char *rdata = NULL;
1363         uint32_t rdata_len = 0;
1364
1365         status = get_complete_frag_recv(subreq);
1366         TALLOC_FREE(subreq);
1367         if (!NT_STATUS_IS_OK(status)) {
1368                 DEBUG(5, ("get_complete_frag failed: %s\n",
1369                           nt_errstr(status)));
1370                 async_req_nterror(req, status);
1371                 return;
1372         }
1373
1374         status = cli_pipe_validate_current_pdu(
1375                 state->cli, &state->rhdr, &state->incoming_frag,
1376                 state->expected_pkt_type, &rdata, &rdata_len,
1377                 &state->incoming_pdu);
1378
1379         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1380                   (unsigned)prs_data_size(&state->incoming_frag),
1381                   (unsigned)state->incoming_pdu_offset,
1382                   nt_errstr(status)));
1383
1384         if (!NT_STATUS_IS_OK(status)) {
1385                 async_req_nterror(req, status);
1386                 return;
1387         }
1388
1389         if ((state->rhdr.flags & RPC_FLG_FIRST)
1390             && (state->rhdr.pack_type[0] == 0)) {
1391                 /*
1392                  * Set the data type correctly for big-endian data on the
1393                  * first packet.
1394                  */
1395                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1396                           "big-endian.\n",
1397                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1398                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1399         }
1400         /*
1401          * Check endianness on subsequent packets.
1402          */
1403         if (state->incoming_frag.bigendian_data
1404             != state->incoming_pdu.bigendian_data) {
1405                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1406                          "%s\n",
1407                          state->incoming_pdu.bigendian_data?"big":"little",
1408                          state->incoming_frag.bigendian_data?"big":"little"));
1409                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1410                 return;
1411         }
1412
1413         /* Now copy the data portion out of the pdu into rbuf. */
1414         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1415                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1416                 return;
1417         }
1418
1419         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1420                rdata, (size_t)rdata_len);
1421         state->incoming_pdu_offset += rdata_len;
1422
1423         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1424                                             &state->incoming_frag);
1425         if (!NT_STATUS_IS_OK(status)) {
1426                 async_req_nterror(req, status);
1427                 return;
1428         }
1429
1430         if (state->rhdr.flags & RPC_FLG_LAST) {
1431                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1432                           rpccli_pipe_txt(debug_ctx(), state->cli),
1433                           (unsigned)prs_data_size(&state->incoming_pdu)));
1434                 async_req_done(req);
1435                 return;
1436         }
1437
1438         subreq = get_complete_frag_send(state, state->ev, state->cli,
1439                                         &state->rhdr, &state->incoming_frag);
1440         if (async_req_nomem(subreq, req)) {
1441                 return;
1442         }
1443         subreq->async.fn = rpc_api_pipe_got_pdu;
1444         subreq->async.priv = req;
1445 }
1446
1447 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1448                                   prs_struct *reply_pdu)
1449 {
1450         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1451                 req->private_data, struct rpc_api_pipe_state);
1452         NTSTATUS status;
1453
1454         if (async_req_is_nterror(req, &status)) {
1455                 return status;
1456         }
1457
1458         *reply_pdu = state->incoming_pdu;
1459         reply_pdu->mem_ctx = mem_ctx;
1460
1461         /*
1462          * Prevent state->incoming_pdu from being freed in
1463          * rpc_api_pipe_state_destructor()
1464          */
1465         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1466
1467         return NT_STATUS_OK;
1468 }
1469
1470 /*******************************************************************
1471  Creates krb5 auth bind.
1472  ********************************************************************/
1473
1474 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1475                                                 enum pipe_auth_level auth_level,
1476                                                 RPC_HDR_AUTH *pauth_out,
1477                                                 prs_struct *auth_data)
1478 {
1479 #ifdef HAVE_KRB5
1480         int ret;
1481         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1482         DATA_BLOB tkt = data_blob_null;
1483         DATA_BLOB tkt_wrapped = data_blob_null;
1484
1485         /* We may change the pad length before marshalling. */
1486         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1487
1488         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1489                 a->service_principal ));
1490
1491         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1492
1493         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1494                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1495
1496         if (ret) {
1497                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1498                         "failed with %s\n",
1499                         a->service_principal,
1500                         error_message(ret) ));
1501
1502                 data_blob_free(&tkt);
1503                 prs_mem_free(auth_data);
1504                 return NT_STATUS_INVALID_PARAMETER;
1505         }
1506
1507         /* wrap that up in a nice GSS-API wrapping */
1508         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1509
1510         data_blob_free(&tkt);
1511
1512         /* Auth len in the rpc header doesn't include auth_header. */
1513         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1514                 data_blob_free(&tkt_wrapped);
1515                 prs_mem_free(auth_data);
1516                 return NT_STATUS_NO_MEMORY;
1517         }
1518
1519         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1520         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1521
1522         data_blob_free(&tkt_wrapped);
1523         return NT_STATUS_OK;
1524 #else
1525         return NT_STATUS_INVALID_PARAMETER;
1526 #endif
1527 }
1528
1529 /*******************************************************************
1530  Creates SPNEGO NTLMSSP auth bind.
1531  ********************************************************************/
1532
1533 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1534                                                 enum pipe_auth_level auth_level,
1535                                                 RPC_HDR_AUTH *pauth_out,
1536                                                 prs_struct *auth_data)
1537 {
1538         NTSTATUS nt_status;
1539         DATA_BLOB null_blob = data_blob_null;
1540         DATA_BLOB request = data_blob_null;
1541         DATA_BLOB spnego_msg = data_blob_null;
1542
1543         /* We may change the pad length before marshalling. */
1544         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1545
1546         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1547         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1548                                         null_blob,
1549                                         &request);
1550
1551         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1552                 data_blob_free(&request);
1553                 prs_mem_free(auth_data);
1554                 return nt_status;
1555         }
1556
1557         /* Wrap this in SPNEGO. */
1558         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1559
1560         data_blob_free(&request);
1561
1562         /* Auth len in the rpc header doesn't include auth_header. */
1563         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1564                 data_blob_free(&spnego_msg);
1565                 prs_mem_free(auth_data);
1566                 return NT_STATUS_NO_MEMORY;
1567         }
1568
1569         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1570         dump_data(5, spnego_msg.data, spnego_msg.length);
1571
1572         data_blob_free(&spnego_msg);
1573         return NT_STATUS_OK;
1574 }
1575
1576 /*******************************************************************
1577  Creates NTLMSSP auth bind.
1578  ********************************************************************/
1579
1580 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1581                                                 enum pipe_auth_level auth_level,
1582                                                 RPC_HDR_AUTH *pauth_out,
1583                                                 prs_struct *auth_data)
1584 {
1585         NTSTATUS nt_status;
1586         DATA_BLOB null_blob = data_blob_null;
1587         DATA_BLOB request = data_blob_null;
1588
1589         /* We may change the pad length before marshalling. */
1590         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1591
1592         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1593         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1594                                         null_blob,
1595                                         &request);
1596
1597         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1598                 data_blob_free(&request);
1599                 prs_mem_free(auth_data);
1600                 return nt_status;
1601         }
1602
1603         /* Auth len in the rpc header doesn't include auth_header. */
1604         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1605                 data_blob_free(&request);
1606                 prs_mem_free(auth_data);
1607                 return NT_STATUS_NO_MEMORY;
1608         }
1609
1610         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1611         dump_data(5, request.data, request.length);
1612
1613         data_blob_free(&request);
1614         return NT_STATUS_OK;
1615 }
1616
1617 /*******************************************************************
1618  Creates schannel auth bind.
1619  ********************************************************************/
1620
1621 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1622                                                 enum pipe_auth_level auth_level,
1623                                                 RPC_HDR_AUTH *pauth_out,
1624                                                 prs_struct *auth_data)
1625 {
1626         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1627
1628         /* We may change the pad length before marshalling. */
1629         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1630
1631         /* Use lp_workgroup() if domain not specified */
1632
1633         if (!cli->auth->domain || !cli->auth->domain[0]) {
1634                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1635                 if (cli->auth->domain == NULL) {
1636                         return NT_STATUS_NO_MEMORY;
1637                 }
1638         }
1639
1640         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1641                                    global_myname());
1642
1643         /*
1644          * Now marshall the data into the auth parse_struct.
1645          */
1646
1647         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1648                                        &schannel_neg, auth_data, 0)) {
1649                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1650                 prs_mem_free(auth_data);
1651                 return NT_STATUS_NO_MEMORY;
1652         }
1653
1654         return NT_STATUS_OK;
1655 }
1656
1657 /*******************************************************************
1658  Creates the internals of a DCE/RPC bind request or alter context PDU.
1659  ********************************************************************/
1660
1661 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1662                                                 prs_struct *rpc_out, 
1663                                                 uint32 rpc_call_id,
1664                                                 const RPC_IFACE *abstract,
1665                                                 const RPC_IFACE *transfer,
1666                                                 RPC_HDR_AUTH *phdr_auth,
1667                                                 prs_struct *pauth_info)
1668 {
1669         RPC_HDR hdr;
1670         RPC_HDR_RB hdr_rb;
1671         RPC_CONTEXT rpc_ctx;
1672         uint16 auth_len = prs_offset(pauth_info);
1673         uint8 ss_padding_len = 0;
1674         uint16 frag_len = 0;
1675
1676         /* create the RPC context. */
1677         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1678
1679         /* create the bind request RPC_HDR_RB */
1680         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1681
1682         /* Start building the frag length. */
1683         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1684
1685         /* Do we need to pad ? */
1686         if (auth_len) {
1687                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1688                 if (data_len % 8) {
1689                         ss_padding_len = 8 - (data_len % 8);
1690                         phdr_auth->auth_pad_len = ss_padding_len;
1691                 }
1692                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1693         }
1694
1695         /* Create the request RPC_HDR */
1696         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1697
1698         /* Marshall the RPC header */
1699         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1700                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1701                 return NT_STATUS_NO_MEMORY;
1702         }
1703
1704         /* Marshall the bind request data */
1705         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1706                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1707                 return NT_STATUS_NO_MEMORY;
1708         }
1709
1710         /*
1711          * Grow the outgoing buffer to store any auth info.
1712          */
1713
1714         if(auth_len != 0) {
1715                 if (ss_padding_len) {
1716                         char pad[8];
1717                         memset(pad, '\0', 8);
1718                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1719                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1720                                 return NT_STATUS_NO_MEMORY;
1721                         }
1722                 }
1723
1724                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1725                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1726                         return NT_STATUS_NO_MEMORY;
1727                 }
1728
1729
1730                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1731                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1732                         return NT_STATUS_NO_MEMORY;
1733                 }
1734         }
1735
1736         return NT_STATUS_OK;
1737 }
1738
1739 /*******************************************************************
1740  Creates a DCE/RPC bind request.
1741  ********************************************************************/
1742
1743 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1744                                 prs_struct *rpc_out, 
1745                                 uint32 rpc_call_id,
1746                                 const RPC_IFACE *abstract,
1747                                 const RPC_IFACE *transfer,
1748                                 enum pipe_auth_type auth_type,
1749                                 enum pipe_auth_level auth_level)
1750 {
1751         RPC_HDR_AUTH hdr_auth;
1752         prs_struct auth_info;
1753         NTSTATUS ret = NT_STATUS_OK;
1754
1755         ZERO_STRUCT(hdr_auth);
1756         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1757                 return NT_STATUS_NO_MEMORY;
1758
1759         switch (auth_type) {
1760                 case PIPE_AUTH_TYPE_SCHANNEL:
1761                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1762                         if (!NT_STATUS_IS_OK(ret)) {
1763                                 prs_mem_free(&auth_info);
1764                                 return ret;
1765                         }
1766                         break;
1767
1768                 case PIPE_AUTH_TYPE_NTLMSSP:
1769                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1770                         if (!NT_STATUS_IS_OK(ret)) {
1771                                 prs_mem_free(&auth_info);
1772                                 return ret;
1773                         }
1774                         break;
1775
1776                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1777                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1778                         if (!NT_STATUS_IS_OK(ret)) {
1779                                 prs_mem_free(&auth_info);
1780                                 return ret;
1781                         }
1782                         break;
1783
1784                 case PIPE_AUTH_TYPE_KRB5:
1785                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1786                         if (!NT_STATUS_IS_OK(ret)) {
1787                                 prs_mem_free(&auth_info);
1788                                 return ret;
1789                         }
1790                         break;
1791
1792                 case PIPE_AUTH_TYPE_NONE:
1793                         break;
1794
1795                 default:
1796                         /* "Can't" happen. */
1797                         return NT_STATUS_INVALID_INFO_CLASS;
1798         }
1799
1800         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1801                                                 rpc_out, 
1802                                                 rpc_call_id,
1803                                                 abstract,
1804                                                 transfer,
1805                                                 &hdr_auth,
1806                                                 &auth_info);
1807
1808         prs_mem_free(&auth_info);
1809         return ret;
1810 }
1811
1812 /*******************************************************************
1813  Create and add the NTLMSSP sign/seal auth header and data.
1814  ********************************************************************/
1815
1816 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1817                                         RPC_HDR *phdr,
1818                                         uint32 ss_padding_len,
1819                                         prs_struct *outgoing_pdu)
1820 {
1821         RPC_HDR_AUTH auth_info;
1822         NTSTATUS status;
1823         DATA_BLOB auth_blob = data_blob_null;
1824         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1825
1826         if (!cli->auth->a_u.ntlmssp_state) {
1827                 return NT_STATUS_INVALID_PARAMETER;
1828         }
1829
1830         /* Init and marshall the auth header. */
1831         init_rpc_hdr_auth(&auth_info,
1832                         map_pipe_auth_type_to_rpc_auth_type(
1833                                 cli->auth->auth_type),
1834                         cli->auth->auth_level,
1835                         ss_padding_len,
1836                         1 /* context id. */);
1837
1838         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1839                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1840                 data_blob_free(&auth_blob);
1841                 return NT_STATUS_NO_MEMORY;
1842         }
1843
1844         switch (cli->auth->auth_level) {
1845                 case PIPE_AUTH_LEVEL_PRIVACY:
1846                         /* Data portion is encrypted. */
1847                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1848                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1849                                         data_and_pad_len,
1850                                         (unsigned char *)prs_data_p(outgoing_pdu),
1851                                         (size_t)prs_offset(outgoing_pdu),
1852                                         &auth_blob);
1853                         if (!NT_STATUS_IS_OK(status)) {
1854                                 data_blob_free(&auth_blob);
1855                                 return status;
1856                         }
1857                         break;
1858
1859                 case PIPE_AUTH_LEVEL_INTEGRITY:
1860                         /* Data is signed. */
1861                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1862                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1863                                         data_and_pad_len,
1864                                         (unsigned char *)prs_data_p(outgoing_pdu),
1865                                         (size_t)prs_offset(outgoing_pdu),
1866                                         &auth_blob);
1867                         if (!NT_STATUS_IS_OK(status)) {
1868                                 data_blob_free(&auth_blob);
1869                                 return status;
1870                         }
1871                         break;
1872
1873                 default:
1874                         /* Can't happen. */
1875                         smb_panic("bad auth level");
1876                         /* Notreached. */
1877                         return NT_STATUS_INVALID_PARAMETER;
1878         }
1879
1880         /* Finally marshall the blob. */
1881
1882         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1883                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1884                         (unsigned int)NTLMSSP_SIG_SIZE));
1885                 data_blob_free(&auth_blob);
1886                 return NT_STATUS_NO_MEMORY;
1887         }
1888
1889         data_blob_free(&auth_blob);
1890         return NT_STATUS_OK;
1891 }
1892
1893 /*******************************************************************
1894  Create and add the schannel sign/seal auth header and data.
1895  ********************************************************************/
1896
1897 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1898                                         RPC_HDR *phdr,
1899                                         uint32 ss_padding_len,
1900                                         prs_struct *outgoing_pdu)
1901 {
1902         RPC_HDR_AUTH auth_info;
1903         RPC_AUTH_SCHANNEL_CHK verf;
1904         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1905         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1906         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1907
1908         if (!sas) {
1909                 return NT_STATUS_INVALID_PARAMETER;
1910         }
1911
1912         /* Init and marshall the auth header. */
1913         init_rpc_hdr_auth(&auth_info,
1914                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1915                         cli->auth->auth_level,
1916                         ss_padding_len,
1917                         1 /* context id. */);
1918
1919         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1920                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1921                 return NT_STATUS_NO_MEMORY;
1922         }
1923
1924         switch (cli->auth->auth_level) {
1925                 case PIPE_AUTH_LEVEL_PRIVACY:
1926                 case PIPE_AUTH_LEVEL_INTEGRITY:
1927                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1928                                 sas->seq_num));
1929
1930                         schannel_encode(sas,
1931                                         cli->auth->auth_level,
1932                                         SENDER_IS_INITIATOR,
1933                                         &verf,
1934                                         data_p,
1935                                         data_and_pad_len);
1936
1937                         sas->seq_num++;
1938                         break;
1939
1940                 default:
1941                         /* Can't happen. */
1942                         smb_panic("bad auth level");
1943                         /* Notreached. */
1944                         return NT_STATUS_INVALID_PARAMETER;
1945         }
1946
1947         /* Finally marshall the blob. */
1948         smb_io_rpc_auth_schannel_chk("",
1949                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1950                         &verf,
1951                         outgoing_pdu,
1952                         0);
1953
1954         return NT_STATUS_OK;
1955 }
1956
1957 /*******************************************************************
1958  Calculate how much data we're going to send in this packet, also
1959  work out any sign/seal padding length.
1960  ********************************************************************/
1961
1962 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1963                                         uint32 data_left,
1964                                         uint16 *p_frag_len,
1965                                         uint16 *p_auth_len,
1966                                         uint32 *p_ss_padding)
1967 {
1968         uint32 data_space, data_len;
1969
1970 #ifdef DEVELOPER
1971         if ((data_left > 0) && (sys_random() % 2)) {
1972                 data_left = MAX(data_left/2, 1);
1973         }
1974 #endif
1975
1976         switch (cli->auth->auth_level) {
1977                 case PIPE_AUTH_LEVEL_NONE:
1978                 case PIPE_AUTH_LEVEL_CONNECT:
1979                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1980                         data_len = MIN(data_space, data_left);
1981                         *p_ss_padding = 0;
1982                         *p_auth_len = 0;
1983                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1984                         return data_len;
1985
1986                 case PIPE_AUTH_LEVEL_INTEGRITY:
1987                 case PIPE_AUTH_LEVEL_PRIVACY:
1988                         /* Treat the same for all authenticated rpc requests. */
1989                         switch(cli->auth->auth_type) {
1990                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1991                                 case PIPE_AUTH_TYPE_NTLMSSP:
1992                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1993                                         break;
1994                                 case PIPE_AUTH_TYPE_SCHANNEL:
1995                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1996                                         break;
1997                                 default:
1998                                         smb_panic("bad auth type");
1999                                         break;
2000                         }
2001
2002                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2003                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2004
2005                         data_len = MIN(data_space, data_left);
2006                         *p_ss_padding = 0;
2007                         if (data_len % 8) {
2008                                 *p_ss_padding = 8 - (data_len % 8);
2009                         }
2010                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2011                                         data_len + *p_ss_padding +              /* data plus padding. */
2012                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2013                         return data_len;
2014
2015                 default:
2016                         smb_panic("bad auth level");
2017                         /* Notreached. */
2018                         return 0;
2019         }
2020 }
2021
2022 /*******************************************************************
2023  External interface.
2024  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2025  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2026  and deals with signing/sealing details.
2027  ********************************************************************/
2028
2029 struct rpc_api_pipe_req_state {
2030         struct event_context *ev;
2031         struct rpc_pipe_client *cli;
2032         uint8_t op_num;
2033         uint32_t call_id;
2034         prs_struct *req_data;
2035         uint32_t req_data_sent;
2036         prs_struct outgoing_frag;
2037         prs_struct reply_pdu;
2038 };
2039
2040 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2041 {
2042         prs_mem_free(&s->outgoing_frag);
2043         prs_mem_free(&s->reply_pdu);
2044         return 0;
2045 }
2046
2047 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2048 static void rpc_api_pipe_req_done(struct async_req *subreq);
2049 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2050                                   bool *is_last_frag);
2051
2052 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2053                                         struct event_context *ev,
2054                                         struct rpc_pipe_client *cli,
2055                                         uint8_t op_num,
2056                                         prs_struct *req_data)
2057 {
2058         struct async_req *result, *subreq;
2059         struct rpc_api_pipe_req_state *state;
2060         NTSTATUS status;
2061         bool is_last_frag;
2062
2063         if (!async_req_setup(mem_ctx, &result, &state,
2064                              struct rpc_api_pipe_req_state)) {
2065                 return NULL;
2066         }
2067         state->ev = ev;
2068         state->cli = cli;
2069         state->op_num = op_num;
2070         state->req_data = req_data;
2071         state->req_data_sent = 0;
2072         state->call_id = get_rpc_call_id();
2073
2074         if (cli->max_xmit_frag
2075             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2076                 /* Server is screwed up ! */
2077                 status = NT_STATUS_INVALID_PARAMETER;
2078                 goto post_status;
2079         }
2080
2081         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2082
2083         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2084                       state, MARSHALL)) {
2085                 status = NT_STATUS_NO_MEMORY;
2086                 goto post_status;
2087         }
2088
2089         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2090
2091         status = prepare_next_frag(state, &is_last_frag);
2092         if (!NT_STATUS_IS_OK(status)) {
2093                 goto post_status;
2094         }
2095
2096         if (is_last_frag) {
2097                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2098                                            &state->outgoing_frag,
2099                                            RPC_RESPONSE);
2100                 if (subreq == NULL) {
2101                         status = NT_STATUS_NO_MEMORY;
2102                         goto post_status;
2103                 }
2104                 subreq->async.fn = rpc_api_pipe_req_done;
2105                 subreq->async.priv = result;
2106         } else {
2107                 subreq = rpc_write_send(
2108                         state, ev, cli->transport,
2109                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2110                         prs_offset(&state->outgoing_frag));
2111                 if (subreq == NULL) {
2112                         status = NT_STATUS_NO_MEMORY;
2113                         goto post_status;
2114                 }
2115                 subreq->async.fn = rpc_api_pipe_req_write_done;
2116                 subreq->async.priv = result;
2117         }
2118         return result;
2119
2120  post_status:
2121         if (async_post_ntstatus(result, ev, status)) {
2122                 return result;
2123         }
2124         TALLOC_FREE(result);
2125         return NULL;
2126 }
2127
2128 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2129                                   bool *is_last_frag)
2130 {
2131         RPC_HDR hdr;
2132         RPC_HDR_REQ hdr_req;
2133         uint32_t data_sent_thistime;
2134         uint16_t auth_len;
2135         uint16_t frag_len;
2136         uint8_t flags = 0;
2137         uint32_t ss_padding;
2138         uint32_t data_left;
2139         char pad[8] = { 0, };
2140         NTSTATUS status;
2141
2142         data_left = prs_offset(state->req_data) - state->req_data_sent;
2143
2144         data_sent_thistime = calculate_data_len_tosend(
2145                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2146
2147         if (state->req_data_sent == 0) {
2148                 flags = RPC_FLG_FIRST;
2149         }
2150
2151         if (data_sent_thistime == data_left) {
2152                 flags |= RPC_FLG_LAST;
2153         }
2154
2155         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2156                 return NT_STATUS_NO_MEMORY;
2157         }
2158
2159         /* Create and marshall the header and request header. */
2160         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2161                      auth_len);
2162
2163         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2164                 return NT_STATUS_NO_MEMORY;
2165         }
2166
2167         /* Create the rpc request RPC_HDR_REQ */
2168         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2169                          state->op_num);
2170
2171         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2172                                 &state->outgoing_frag, 0)) {
2173                 return NT_STATUS_NO_MEMORY;
2174         }
2175
2176         /* Copy in the data, plus any ss padding. */
2177         if (!prs_append_some_prs_data(&state->outgoing_frag,
2178                                       state->req_data, state->req_data_sent,
2179                                       data_sent_thistime)) {
2180                 return NT_STATUS_NO_MEMORY;
2181         }
2182
2183         /* Copy the sign/seal padding data. */
2184         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2185                 return NT_STATUS_NO_MEMORY;
2186         }
2187
2188         /* Generate any auth sign/seal and add the auth footer. */
2189         switch (state->cli->auth->auth_type) {
2190         case PIPE_AUTH_TYPE_NONE:
2191                 status = NT_STATUS_OK;
2192                 break;
2193         case PIPE_AUTH_TYPE_NTLMSSP:
2194         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2195                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2196                                                  &state->outgoing_frag);
2197                 break;
2198         case PIPE_AUTH_TYPE_SCHANNEL:
2199                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2200                                                   &state->outgoing_frag);
2201                 break;
2202         default:
2203                 status = NT_STATUS_INVALID_PARAMETER;
2204                 break;
2205         }
2206
2207         state->req_data_sent += data_sent_thistime;
2208         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2209
2210         return status;
2211 }
2212
2213 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2214 {
2215         struct async_req *req = talloc_get_type_abort(
2216                 subreq->async.priv, struct async_req);
2217         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2218                 req->private_data, struct rpc_api_pipe_req_state);
2219         NTSTATUS status;
2220         bool is_last_frag;
2221
2222         status = rpc_write_recv(subreq);
2223         TALLOC_FREE(subreq);
2224         if (!NT_STATUS_IS_OK(status)) {
2225                 async_req_nterror(req, status);
2226                 return;
2227         }
2228
2229         status = prepare_next_frag(state, &is_last_frag);
2230         if (!NT_STATUS_IS_OK(status)) {
2231                 async_req_nterror(req, status);
2232                 return;
2233         }
2234
2235         if (is_last_frag) {
2236                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2237                                            &state->outgoing_frag,
2238                                            RPC_RESPONSE);
2239                 if (async_req_nomem(subreq, req)) {
2240                         return;
2241                 }
2242                 subreq->async.fn = rpc_api_pipe_req_done;
2243                 subreq->async.priv = req;
2244         } else {
2245                 subreq = rpc_write_send(
2246                         state, state->ev,
2247                         state->cli->transport,
2248                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2249                         prs_offset(&state->outgoing_frag));
2250                 if (async_req_nomem(subreq, req)) {
2251                         return;
2252                 }
2253                 subreq->async.fn = rpc_api_pipe_req_write_done;
2254                 subreq->async.priv = req;
2255         }
2256 }
2257
2258 static void rpc_api_pipe_req_done(struct async_req *subreq)
2259 {
2260         struct async_req *req = talloc_get_type_abort(
2261                 subreq->async.priv, struct async_req);
2262         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2263                 req->private_data, struct rpc_api_pipe_req_state);
2264         NTSTATUS status;
2265
2266         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2267         TALLOC_FREE(subreq);
2268         if (!NT_STATUS_IS_OK(status)) {
2269                 async_req_nterror(req, status);
2270                 return;
2271         }
2272         async_req_done(req);
2273 }
2274
2275 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2276                                prs_struct *reply_pdu)
2277 {
2278         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2279                 req->private_data, struct rpc_api_pipe_req_state);
2280         NTSTATUS status;
2281
2282         if (async_req_is_nterror(req, &status)) {
2283                 /*
2284                  * We always have to initialize to reply pdu, even if there is
2285                  * none. The rpccli_* caller routines expect this.
2286                  */
2287                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2288                 return status;
2289         }
2290
2291         *reply_pdu = state->reply_pdu;
2292         reply_pdu->mem_ctx = mem_ctx;
2293
2294         /*
2295          * Prevent state->req_pdu from being freed in
2296          * rpc_api_pipe_req_state_destructor()
2297          */
2298         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2299
2300         return NT_STATUS_OK;
2301 }
2302
2303 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2304                         uint8 op_num,
2305                         prs_struct *in_data,
2306                         prs_struct *out_data)
2307 {
2308         TALLOC_CTX *frame = talloc_stackframe();
2309         struct event_context *ev;
2310         struct async_req *req;
2311         NTSTATUS status = NT_STATUS_NO_MEMORY;
2312
2313         ev = event_context_init(frame);
2314         if (ev == NULL) {
2315                 goto fail;
2316         }
2317
2318         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2319         if (req == NULL) {
2320                 goto fail;
2321         }
2322
2323         while (req->state < ASYNC_REQ_DONE) {
2324                 event_loop_once(ev);
2325         }
2326
2327         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2328  fail:
2329         TALLOC_FREE(frame);
2330         return status;
2331 }
2332
2333 #if 0
2334 /****************************************************************************
2335  Set the handle state.
2336 ****************************************************************************/
2337
2338 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2339                                    const char *pipe_name, uint16 device_state)
2340 {
2341         bool state_set = False;
2342         char param[2];
2343         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2344         char *rparam = NULL;
2345         char *rdata = NULL;
2346         uint32 rparam_len, rdata_len;
2347
2348         if (pipe_name == NULL)
2349                 return False;
2350
2351         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2352                  cli->fnum, pipe_name, device_state));
2353
2354         /* create parameters: device state */
2355         SSVAL(param, 0, device_state);
2356
2357         /* create setup parameters. */
2358         setup[0] = 0x0001; 
2359         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2360
2361         /* send the data on \PIPE\ */
2362         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2363                     setup, 2, 0,                /* setup, length, max */
2364                     param, 2, 0,                /* param, length, max */
2365                     NULL, 0, 1024,              /* data, length, max */
2366                     &rparam, &rparam_len,        /* return param, length */
2367                     &rdata, &rdata_len))         /* return data, length */
2368         {
2369                 DEBUG(5, ("Set Handle state: return OK\n"));
2370                 state_set = True;
2371         }
2372
2373         SAFE_FREE(rparam);
2374         SAFE_FREE(rdata);
2375
2376         return state_set;
2377 }
2378 #endif
2379
2380 /****************************************************************************
2381  Check the rpc bind acknowledge response.
2382 ****************************************************************************/
2383
2384 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2385 {
2386         if ( hdr_ba->addr.len == 0) {
2387                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2388         }
2389
2390         /* check the transfer syntax */
2391         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2392              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2393                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2394                 return False;
2395         }
2396
2397         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2398                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2399                           hdr_ba->res.num_results, hdr_ba->res.reason));
2400         }
2401
2402         DEBUG(5,("check_bind_response: accepted!\n"));
2403         return True;
2404 }
2405
2406 /*******************************************************************
2407  Creates a DCE/RPC bind authentication response.
2408  This is the packet that is sent back to the server once we
2409  have received a BIND-ACK, to finish the third leg of
2410  the authentication handshake.
2411  ********************************************************************/
2412
2413 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2414                                 uint32 rpc_call_id,
2415                                 enum pipe_auth_type auth_type,
2416                                 enum pipe_auth_level auth_level,
2417                                 DATA_BLOB *pauth_blob,
2418                                 prs_struct *rpc_out)
2419 {
2420         RPC_HDR hdr;
2421         RPC_HDR_AUTH hdr_auth;
2422         uint32 pad = 0;
2423
2424         /* Create the request RPC_HDR */
2425         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2426                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2427                      pauth_blob->length );
2428
2429         /* Marshall it. */
2430         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2431                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2432                 return NT_STATUS_NO_MEMORY;
2433         }
2434
2435         /*
2436                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2437                 about padding - shouldn't this pad to length 8 ? JRA.
2438         */
2439
2440         /* 4 bytes padding. */
2441         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2442                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2443                 return NT_STATUS_NO_MEMORY;
2444         }
2445
2446         /* Create the request RPC_HDR_AUTHA */
2447         init_rpc_hdr_auth(&hdr_auth,
2448                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2449                         auth_level, 0, 1);
2450
2451         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2452                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2453                 return NT_STATUS_NO_MEMORY;
2454         }
2455
2456         /*
2457          * Append the auth data to the outgoing buffer.
2458          */
2459
2460         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2461                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2462                 return NT_STATUS_NO_MEMORY;
2463         }
2464
2465         return NT_STATUS_OK;
2466 }
2467
2468 /*******************************************************************
2469  Creates a DCE/RPC bind alter context authentication request which
2470  may contain a spnego auth blobl
2471  ********************************************************************/
2472
2473 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2474                                         const RPC_IFACE *abstract,
2475                                         const RPC_IFACE *transfer,
2476                                         enum pipe_auth_level auth_level,
2477                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2478                                         prs_struct *rpc_out)
2479 {
2480         RPC_HDR_AUTH hdr_auth;
2481         prs_struct auth_info;
2482         NTSTATUS ret = NT_STATUS_OK;
2483
2484         ZERO_STRUCT(hdr_auth);
2485         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2486                 return NT_STATUS_NO_MEMORY;
2487
2488         /* We may change the pad length before marshalling. */
2489         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2490
2491         if (pauth_blob->length) {
2492                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2493                         prs_mem_free(&auth_info);
2494                         return NT_STATUS_NO_MEMORY;
2495                 }
2496         }
2497
2498         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2499                                                 rpc_out, 
2500                                                 rpc_call_id,
2501                                                 abstract,
2502                                                 transfer,
2503                                                 &hdr_auth,
2504                                                 &auth_info);
2505         prs_mem_free(&auth_info);
2506         return ret;
2507 }
2508
2509 /****************************************************************************
2510  Do an rpc bind.
2511 ****************************************************************************/
2512
2513 struct rpc_pipe_bind_state {
2514         struct event_context *ev;
2515         struct rpc_pipe_client *cli;
2516         prs_struct rpc_out;
2517         uint32_t rpc_call_id;
2518 };
2519
2520 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2521 {
2522         prs_mem_free(&state->rpc_out);
2523         return 0;
2524 }
2525
2526 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2527 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2528                                            struct rpc_pipe_bind_state *state,
2529                                            struct rpc_hdr_info *phdr,
2530                                            prs_struct *reply_pdu);
2531 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2532 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2533                                                     struct rpc_pipe_bind_state *state,
2534                                                     struct rpc_hdr_info *phdr,
2535                                                     prs_struct *reply_pdu);
2536 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2537
2538 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2539                                      struct event_context *ev,
2540                                      struct rpc_pipe_client *cli,
2541                                      struct cli_pipe_auth_data *auth)
2542 {
2543         struct async_req *result, *subreq;
2544         struct rpc_pipe_bind_state *state;
2545         NTSTATUS status;
2546
2547         if (!async_req_setup(mem_ctx, &result, &state,
2548                              struct rpc_pipe_bind_state)) {
2549                 return NULL;
2550         }
2551
2552         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2553                 rpccli_pipe_txt(debug_ctx(), cli),
2554                 (unsigned int)auth->auth_type,
2555                 (unsigned int)auth->auth_level ));
2556
2557         state->ev = ev;
2558         state->cli = cli;
2559         state->rpc_call_id = get_rpc_call_id();
2560
2561         prs_init_empty(&state->rpc_out, state, MARSHALL);
2562         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2563
2564         cli->auth = talloc_move(cli, &auth);
2565
2566         /* Marshall the outgoing data. */
2567         status = create_rpc_bind_req(cli, &state->rpc_out,
2568                                      state->rpc_call_id,
2569                                      &cli->abstract_syntax,
2570                                      &cli->transfer_syntax,
2571                                      cli->auth->auth_type,
2572                                      cli->auth->auth_level);
2573
2574         if (!NT_STATUS_IS_OK(status)) {
2575                 goto post_status;
2576         }
2577
2578         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2579                                    RPC_BINDACK);
2580         if (subreq == NULL) {
2581                 status = NT_STATUS_NO_MEMORY;
2582                 goto post_status;
2583         }
2584         subreq->async.fn = rpc_pipe_bind_step_one_done;
2585         subreq->async.priv = result;
2586         return result;
2587
2588  post_status:
2589         if (async_post_ntstatus(result, ev, status)) {
2590                 return result;
2591         }
2592         TALLOC_FREE(result);
2593         return NULL;
2594 }
2595
2596 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2597 {
2598         struct async_req *req = talloc_get_type_abort(
2599                 subreq->async.priv, struct async_req);
2600         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2601                 req->private_data, struct rpc_pipe_bind_state);
2602         prs_struct reply_pdu;
2603         struct rpc_hdr_info hdr;
2604         struct rpc_hdr_ba_info hdr_ba;
2605         NTSTATUS status;
2606
2607         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2608         TALLOC_FREE(subreq);
2609         if (!NT_STATUS_IS_OK(status)) {
2610                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2611                           rpccli_pipe_txt(debug_ctx(), state->cli),
2612                           nt_errstr(status)));
2613                 async_req_nterror(req, status);
2614                 return;
2615         }
2616
2617         /* Unmarshall the RPC header */
2618         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2619                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2620                 prs_mem_free(&reply_pdu);
2621                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2622                 return;
2623         }
2624
2625         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2626                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2627                           "RPC_HDR_BA.\n"));
2628                 prs_mem_free(&reply_pdu);
2629                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2630                 return;
2631         }
2632
2633         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2634                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2635                 prs_mem_free(&reply_pdu);
2636                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2637                 return;
2638         }
2639
2640         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2641         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2642
2643         /*
2644          * For authenticated binds we may need to do 3 or 4 leg binds.
2645          */
2646
2647         switch(state->cli->auth->auth_type) {
2648
2649         case PIPE_AUTH_TYPE_NONE:
2650         case PIPE_AUTH_TYPE_SCHANNEL:
2651                 /* Bind complete. */
2652                 prs_mem_free(&reply_pdu);
2653                 async_req_done(req);
2654                 break;
2655
2656         case PIPE_AUTH_TYPE_NTLMSSP:
2657                 /* Need to send AUTH3 packet - no reply. */
2658                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2659                                                     &reply_pdu);
2660                 prs_mem_free(&reply_pdu);
2661                 if (!NT_STATUS_IS_OK(status)) {
2662                         async_req_nterror(req, status);
2663                 }
2664                 break;
2665
2666         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2667                 /* Need to send alter context request and reply. */
2668                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2669                                                              &reply_pdu);
2670                 prs_mem_free(&reply_pdu);
2671                 if (!NT_STATUS_IS_OK(status)) {
2672                         async_req_nterror(req, status);
2673                 }
2674                 break;
2675
2676         case PIPE_AUTH_TYPE_KRB5:
2677                 /* */
2678
2679         default:
2680                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2681                          (unsigned int)state->cli->auth->auth_type));
2682                 prs_mem_free(&reply_pdu);
2683                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2684         }
2685 }
2686
2687 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2688                                            struct rpc_pipe_bind_state *state,
2689                                            struct rpc_hdr_info *phdr,
2690                                            prs_struct *reply_pdu)
2691 {
2692         DATA_BLOB server_response = data_blob_null;
2693         DATA_BLOB client_reply = data_blob_null;
2694         struct rpc_hdr_auth_info hdr_auth;
2695         struct async_req *subreq;
2696         NTSTATUS status;
2697
2698         if ((phdr->auth_len == 0)
2699             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2700                 return NT_STATUS_INVALID_PARAMETER;
2701         }
2702
2703         if (!prs_set_offset(
2704                     reply_pdu,
2705                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2706                 return NT_STATUS_INVALID_PARAMETER;
2707         }
2708
2709         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2710                 return NT_STATUS_INVALID_PARAMETER;
2711         }
2712
2713         /* TODO - check auth_type/auth_level match. */
2714
2715         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2716         prs_copy_data_out((char *)server_response.data, reply_pdu,
2717                           phdr->auth_len);
2718
2719         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2720                                 server_response, &client_reply);
2721
2722         if (!NT_STATUS_IS_OK(status)) {
2723                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2724                           "blob failed: %s.\n", nt_errstr(status)));
2725                 return status;
2726         }
2727
2728         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2729
2730         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2731                                        state->cli->auth->auth_type,
2732                                        state->cli->auth->auth_level,
2733                                        &client_reply, &state->rpc_out);
2734         data_blob_free(&client_reply);
2735
2736         if (!NT_STATUS_IS_OK(status)) {
2737                 return status;
2738         }
2739
2740         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2741                                 (uint8_t *)prs_data_p(&state->rpc_out),
2742                                 prs_offset(&state->rpc_out));
2743         if (subreq == NULL) {
2744                 return NT_STATUS_NO_MEMORY;
2745         }
2746         subreq->async.fn = rpc_bind_auth3_write_done;
2747         subreq->async.priv = req;
2748         return NT_STATUS_OK;
2749 }
2750
2751 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2752 {
2753         struct async_req *req = talloc_get_type_abort(
2754                 subreq->async.priv, struct async_req);
2755         NTSTATUS status;
2756
2757         status = rpc_write_recv(subreq);
2758         TALLOC_FREE(subreq);
2759         if (!NT_STATUS_IS_OK(status)) {
2760                 async_req_nterror(req, status);
2761                 return;
2762         }
2763         async_req_done(req);
2764 }
2765
2766 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2767                                                     struct rpc_pipe_bind_state *state,
2768                                                     struct rpc_hdr_info *phdr,
2769                                                     prs_struct *reply_pdu)
2770 {
2771         DATA_BLOB server_spnego_response = data_blob_null;
2772         DATA_BLOB server_ntlm_response = data_blob_null;
2773         DATA_BLOB client_reply = data_blob_null;
2774         DATA_BLOB tmp_blob = data_blob_null;
2775         RPC_HDR_AUTH hdr_auth;
2776         struct async_req *subreq;
2777         NTSTATUS status;
2778
2779         if ((phdr->auth_len == 0)
2780             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2781                 return NT_STATUS_INVALID_PARAMETER;
2782         }
2783
2784         /* Process the returned NTLMSSP blob first. */
2785         if (!prs_set_offset(
2786                     reply_pdu,
2787                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2788                 return NT_STATUS_INVALID_PARAMETER;
2789         }
2790
2791         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2792                 return NT_STATUS_INVALID_PARAMETER;
2793         }
2794
2795         server_spnego_response = data_blob(NULL, phdr->auth_len);
2796         prs_copy_data_out((char *)server_spnego_response.data,
2797                           reply_pdu, phdr->auth_len);
2798
2799         /*
2800          * The server might give us back two challenges - tmp_blob is for the
2801          * second.
2802          */
2803         if (!spnego_parse_challenge(server_spnego_response,
2804                                     &server_ntlm_response, &tmp_blob)) {
2805                 data_blob_free(&server_spnego_response);
2806                 data_blob_free(&server_ntlm_response);
2807                 data_blob_free(&tmp_blob);
2808                 return NT_STATUS_INVALID_PARAMETER;
2809         }
2810
2811         /* We're finished with the server spnego response and the tmp_blob. */
2812         data_blob_free(&server_spnego_response);
2813         data_blob_free(&tmp_blob);
2814
2815         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2816                                 server_ntlm_response, &client_reply);
2817
2818         /* Finished with the server_ntlm response */
2819         data_blob_free(&server_ntlm_response);
2820
2821         if (!NT_STATUS_IS_OK(status)) {
2822                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2823                           "using server blob failed.\n"));
2824                 data_blob_free(&client_reply);
2825                 return status;
2826         }
2827
2828         /* SPNEGO wrap the client reply. */
2829         tmp_blob = spnego_gen_auth(client_reply);
2830         data_blob_free(&client_reply);
2831         client_reply = tmp_blob;
2832         tmp_blob = data_blob_null;
2833
2834         /* Now prepare the alter context pdu. */
2835         prs_init_empty(&state->rpc_out, state, MARSHALL);
2836
2837         status = create_rpc_alter_context(state->rpc_call_id,
2838                                           &state->cli->abstract_syntax,
2839                                           &state->cli->transfer_syntax,
2840                                           state->cli->auth->auth_level,
2841                                           &client_reply,
2842                                           &state->rpc_out);
2843         data_blob_free(&client_reply);
2844
2845         if (!NT_STATUS_IS_OK(status)) {
2846                 return status;
2847         }
2848
2849         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2850                                    &state->rpc_out, RPC_ALTCONTRESP);
2851         if (subreq == NULL) {
2852                 return NT_STATUS_NO_MEMORY;
2853         }
2854         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2855         subreq->async.priv = req;
2856         return NT_STATUS_OK;
2857 }
2858
2859 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2860 {
2861         struct async_req *req = talloc_get_type_abort(
2862                 subreq->async.priv, struct async_req);
2863         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2864                 req->private_data, struct rpc_pipe_bind_state);
2865         DATA_BLOB server_spnego_response = data_blob_null;
2866         DATA_BLOB tmp_blob = data_blob_null;
2867         prs_struct reply_pdu;
2868         struct rpc_hdr_info hdr;
2869         struct rpc_hdr_auth_info hdr_auth;
2870         NTSTATUS status;
2871
2872         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2873         TALLOC_FREE(subreq);
2874         if (!NT_STATUS_IS_OK(status)) {
2875                 async_req_nterror(req, status);
2876                 return;
2877         }
2878
2879         /* Get the auth blob from the reply. */
2880         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2881                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2882                           "unmarshall RPC_HDR.\n"));
2883                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2884                 return;
2885         }
2886
2887         if (!prs_set_offset(
2888                     &reply_pdu,
2889                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2890                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2891                 return;
2892         }
2893
2894         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2895                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2896                 return;
2897         }
2898
2899         server_spnego_response = data_blob(NULL, hdr.auth_len);
2900         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2901                           hdr.auth_len);
2902
2903         /* Check we got a valid auth response. */
2904         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2905                                         OID_NTLMSSP, &tmp_blob)) {
2906                 data_blob_free(&server_spnego_response);
2907                 data_blob_free(&tmp_blob);
2908                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2909                 return;
2910         }
2911
2912         data_blob_free(&server_spnego_response);
2913         data_blob_free(&tmp_blob);
2914
2915         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2916                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2917         async_req_done(req);
2918 }
2919
2920 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2921 {
2922         return async_req_simple_recv_ntstatus(req);
2923 }
2924
2925 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2926                        struct cli_pipe_auth_data *auth)
2927 {
2928         TALLOC_CTX *frame = talloc_stackframe();
2929         struct event_context *ev;
2930         struct async_req *req;
2931         NTSTATUS status = NT_STATUS_NO_MEMORY;
2932
2933         ev = event_context_init(frame);
2934         if (ev == NULL) {
2935                 goto fail;
2936         }
2937
2938         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2939         if (req == NULL) {
2940                 goto fail;
2941         }
2942
2943         while (req->state < ASYNC_REQ_DONE) {
2944                 event_loop_once(ev);
2945         }
2946
2947         status = rpc_pipe_bind_recv(req);
2948  fail:
2949         TALLOC_FREE(frame);
2950         return status;
2951 }
2952
2953 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2954                                 unsigned int timeout)
2955 {
2956         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2957
2958         if (cli == NULL) {
2959                 return 0;
2960         }
2961         return cli_set_timeout(cli, timeout);
2962 }
2963
2964 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2965 {
2966         struct cli_state *cli;
2967
2968         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2969             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2970                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2971                 return true;
2972         }
2973
2974         cli = rpc_pipe_np_smb_conn(rpc_cli);
2975         if (cli == NULL) {
2976                 return false;
2977         }
2978         E_md4hash(cli->password ? cli->password : "", nt_hash);
2979         return true;
2980 }
2981
2982 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2983                                struct cli_pipe_auth_data **presult)
2984 {
2985         struct cli_pipe_auth_data *result;
2986
2987         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2988         if (result == NULL) {
2989                 return NT_STATUS_NO_MEMORY;
2990         }
2991
2992         result->auth_type = PIPE_AUTH_TYPE_NONE;
2993         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2994
2995         result->user_name = talloc_strdup(result, "");
2996         result->domain = talloc_strdup(result, "");
2997         if ((result->user_name == NULL) || (result->domain == NULL)) {
2998                 TALLOC_FREE(result);
2999                 return NT_STATUS_NO_MEMORY;
3000         }
3001
3002         *presult = result;
3003         return NT_STATUS_OK;
3004 }
3005
3006 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3007 {
3008         ntlmssp_end(&auth->a_u.ntlmssp_state);
3009         return 0;
3010 }
3011
3012 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3013                                   enum pipe_auth_type auth_type,
3014                                   enum pipe_auth_level auth_level,
3015                                   const char *domain,
3016                                   const char *username,
3017                                   const char *password,
3018                                   struct cli_pipe_auth_data **presult)
3019 {
3020         struct cli_pipe_auth_data *result;
3021         NTSTATUS status;
3022
3023         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3024         if (result == NULL) {
3025                 return NT_STATUS_NO_MEMORY;
3026         }
3027
3028         result->auth_type = auth_type;
3029         result->auth_level = auth_level;
3030
3031         result->user_name = talloc_strdup(result, username);
3032         result->domain = talloc_strdup(result, domain);
3033         if ((result->user_name == NULL) || (result->domain == NULL)) {
3034                 status = NT_STATUS_NO_MEMORY;
3035                 goto fail;
3036         }
3037
3038         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3039         if (!NT_STATUS_IS_OK(status)) {
3040                 goto fail;
3041         }
3042
3043         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3044
3045         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3046         if (!NT_STATUS_IS_OK(status)) {
3047                 goto fail;
3048         }
3049
3050         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3051         if (!NT_STATUS_IS_OK(status)) {
3052                 goto fail;
3053         }
3054
3055         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3056         if (!NT_STATUS_IS_OK(status)) {
3057                 goto fail;
3058         }
3059
3060         /*
3061          * Turn off sign+seal to allow selected auth level to turn it back on.
3062          */
3063         result->a_u.ntlmssp_state->neg_flags &=
3064                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3065
3066         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3067                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3068         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3069                 result->a_u.ntlmssp_state->neg_flags
3070                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3071         }
3072
3073         *presult = result;
3074         return NT_STATUS_OK;
3075
3076  fail:
3077         TALLOC_FREE(result);
3078         return status;
3079 }
3080
3081 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3082                                    enum pipe_auth_level auth_level,
3083                                    const uint8_t sess_key[16],
3084                                    struct cli_pipe_auth_data **presult)
3085 {
3086         struct cli_pipe_auth_data *result;
3087
3088         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3089         if (result == NULL) {
3090                 return NT_STATUS_NO_MEMORY;
3091         }
3092
3093         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3094         result->auth_level = auth_level;
3095
3096         result->user_name = talloc_strdup(result, "");
3097         result->domain = talloc_strdup(result, domain);
3098         if ((result->user_name == NULL) || (result->domain == NULL)) {
3099                 goto fail;
3100         }
3101
3102         result->a_u.schannel_auth = talloc(result,
3103                                            struct schannel_auth_struct);
3104         if (result->a_u.schannel_auth == NULL) {
3105                 goto fail;
3106         }
3107
3108         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3109                sizeof(result->a_u.schannel_auth->sess_key));
3110         result->a_u.schannel_auth->seq_num = 0;
3111
3112         *presult = result;
3113         return NT_STATUS_OK;
3114
3115  fail:
3116         TALLOC_FREE(result);
3117         return NT_STATUS_NO_MEMORY;
3118 }
3119
3120 #ifdef HAVE_KRB5
3121 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3122 {
3123         data_blob_free(&auth->session_key);
3124         return 0;
3125 }
3126 #endif
3127
3128 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3129                                    enum pipe_auth_level auth_level,
3130                                    const char *service_princ,
3131                                    const char *username,
3132                                    const char *password,
3133                                    struct cli_pipe_auth_data **presult)
3134 {
3135 #ifdef HAVE_KRB5
3136         struct cli_pipe_auth_data *result;
3137
3138         if ((username != NULL) && (password != NULL)) {
3139                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3140                 if (ret != 0) {
3141                         return NT_STATUS_ACCESS_DENIED;
3142                 }
3143         }
3144
3145         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3146         if (result == NULL) {
3147                 return NT_STATUS_NO_MEMORY;
3148         }
3149
3150         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3151         result->auth_level = auth_level;
3152
3153         /*
3154          * Username / domain need fixing!
3155          */
3156         result->user_name = talloc_strdup(result, "");
3157         result->domain = talloc_strdup(result, "");
3158         if ((result->user_name == NULL) || (result->domain == NULL)) {
3159                 goto fail;
3160         }
3161
3162         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3163                 result, struct kerberos_auth_struct);
3164         if (result->a_u.kerberos_auth == NULL) {
3165                 goto fail;
3166         }
3167         talloc_set_destructor(result->a_u.kerberos_auth,
3168                               cli_auth_kerberos_data_destructor);
3169
3170         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3171                 result, service_princ);
3172         if (result->a_u.kerberos_auth->service_principal == NULL) {
3173                 goto fail;
3174         }
3175
3176         *presult = result;
3177         return NT_STATUS_OK;
3178
3179  fail:
3180         TALLOC_FREE(result);
3181         return NT_STATUS_NO_MEMORY;
3182 #else
3183         return NT_STATUS_NOT_SUPPORTED;
3184 #endif
3185 }
3186
3187 /**
3188  * Create an rpc pipe client struct, connecting to a tcp port.
3189  */
3190 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3191                                        uint16_t port,
3192                                        const struct ndr_syntax_id *abstract_syntax,
3193                                        struct rpc_pipe_client **presult)
3194 {
3195         struct rpc_pipe_client *result;
3196         struct sockaddr_storage addr;
3197         NTSTATUS status;
3198         int fd;
3199
3200         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3201         if (result == NULL) {
3202                 return NT_STATUS_NO_MEMORY;
3203         }
3204
3205         result->abstract_syntax = *abstract_syntax;
3206         result->transfer_syntax = ndr_transfer_syntax;
3207         result->dispatch = cli_do_rpc_ndr;
3208
3209         result->desthost = talloc_strdup(result, host);
3210         result->srv_name_slash = talloc_asprintf_strupper_m(
3211                 result, "\\\\%s", result->desthost);
3212         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3213                 status = NT_STATUS_NO_MEMORY;
3214                 goto fail;
3215         }
3216
3217         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3218         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3219
3220         if (!resolve_name(host, &addr, 0)) {
3221                 status = NT_STATUS_NOT_FOUND;
3222                 goto fail;
3223         }
3224
3225         status = open_socket_out(&addr, port, 60, &fd);
3226         if (!NT_STATUS_IS_OK(status)) {
3227                 goto fail;
3228         }
3229         set_socket_options(fd, lp_socket_options());
3230
3231         status = rpc_transport_sock_init(result, fd, &result->transport);
3232         if (!NT_STATUS_IS_OK(status)) {
3233                 close(fd);
3234                 goto fail;
3235         }
3236
3237         *presult = result;
3238         return NT_STATUS_OK;
3239
3240  fail:
3241         TALLOC_FREE(result);
3242         return status;
3243 }
3244
3245 /**
3246  * Determine the tcp port on which a dcerpc interface is listening
3247  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3248  * target host.
3249  */
3250 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3251                                       const struct ndr_syntax_id *abstract_syntax,
3252                                       uint16_t *pport)
3253 {
3254         NTSTATUS status;
3255         struct rpc_pipe_client *epm_pipe = NULL;
3256         struct cli_pipe_auth_data *auth = NULL;
3257         struct dcerpc_binding *map_binding = NULL;
3258         struct dcerpc_binding *res_binding = NULL;
3259         struct epm_twr_t *map_tower = NULL;
3260         struct epm_twr_t *res_towers = NULL;
3261         struct policy_handle *entry_handle = NULL;
3262         uint32_t num_towers = 0;
3263         uint32_t max_towers = 1;
3264         struct epm_twr_p_t towers;
3265         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3266
3267         if (pport == NULL) {
3268                 status = NT_STATUS_INVALID_PARAMETER;
3269                 goto done;
3270         }
3271
3272         /* open the connection to the endpoint mapper */
3273         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3274                                         &ndr_table_epmapper.syntax_id,
3275                                         &epm_pipe);
3276
3277         if (!NT_STATUS_IS_OK(status)) {
3278                 goto done;
3279         }
3280
3281         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3282         if (!NT_STATUS_IS_OK(status)) {
3283                 goto done;
3284         }
3285
3286         status = rpc_pipe_bind(epm_pipe, auth);
3287         if (!NT_STATUS_IS_OK(status)) {
3288                 goto done;
3289         }
3290
3291         /* create tower for asking the epmapper */
3292
3293         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3294         if (map_binding == NULL) {
3295                 status = NT_STATUS_NO_MEMORY;
3296                 goto done;
3297         }
3298
3299         map_binding->transport = NCACN_IP_TCP;
3300         map_binding->object = *abstract_syntax;
3301         map_binding->host = host; /* needed? */
3302         map_binding->endpoint = "0"; /* correct? needed? */
3303
3304         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3305         if (map_tower == NULL) {
3306                 status = NT_STATUS_NO_MEMORY;
3307                 goto done;
3308         }
3309
3310         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3311                                             &(map_tower->tower));
3312         if (!NT_STATUS_IS_OK(status)) {
3313                 goto done;
3314         }
3315
3316         /* allocate further parameters for the epm_Map call */
3317
3318         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3319         if (res_towers == NULL) {
3320                 status = NT_STATUS_NO_MEMORY;
3321                 goto done;
3322         }
3323         towers.twr = res_towers;
3324
3325         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3326         if (entry_handle == NULL) {
3327                 status = NT_STATUS_NO_MEMORY;
3328                 goto done;
3329         }
3330
3331         /* ask the endpoint mapper for the port */
3332
3333         status = rpccli_epm_Map(epm_pipe,
3334                                 tmp_ctx,
3335                                 CONST_DISCARD(struct GUID *,
3336                                               &(abstract_syntax->uuid)),
3337                                 map_tower,
3338                                 entry_handle,
3339                                 max_towers,
3340                                 &num_towers,
3341                                 &towers);
3342
3343         if (!NT_STATUS_IS_OK(status)) {
3344                 goto done;
3345         }
3346
3347         if (num_towers != 1) {
3348                 status = NT_STATUS_UNSUCCESSFUL;
3349                 goto done;
3350         }
3351
3352         /* extract the port from the answer */
3353
3354         status = dcerpc_binding_from_tower(tmp_ctx,
3355                                            &(towers.twr->tower),
3356                                            &res_binding);
3357         if (!NT_STATUS_IS_OK(status)) {
3358                 goto done;
3359         }
3360
3361         /* are further checks here necessary? */
3362         if (res_binding->transport != NCACN_IP_TCP) {
3363                 status = NT_STATUS_UNSUCCESSFUL;
3364                 goto done;
3365         }
3366
3367         *pport = (uint16_t)atoi(res_binding->endpoint);
3368
3369 done:
3370         TALLOC_FREE(tmp_ctx);
3371         return status;
3372 }
3373
3374 /**
3375  * Create a rpc pipe client struct, connecting to a host via tcp.
3376  * The port is determined by asking the endpoint mapper on the given
3377  * host.
3378  */
3379 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3380                            const struct ndr_syntax_id *abstract_syntax,
3381                            struct rpc_pipe_client **presult)
3382 {
3383         NTSTATUS status;
3384         uint16_t port = 0;
3385
3386         *presult = NULL;
3387
3388         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3389         if (!NT_STATUS_IS_OK(status)) {
3390                 goto done;
3391         }
3392
3393         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3394                                         abstract_syntax, presult);
3395
3396 done:
3397         return status;
3398 }
3399
3400 /********************************************************************
3401  Create a rpc pipe client struct, connecting to a unix domain socket
3402  ********************************************************************/
3403 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3404                                const struct ndr_syntax_id *abstract_syntax,
3405                                struct rpc_pipe_client **presult)
3406 {
3407         struct rpc_pipe_client *result;
3408         struct sockaddr_un addr;
3409         NTSTATUS status;
3410         int fd;
3411
3412         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3413         if (result == NULL) {
3414                 return NT_STATUS_NO_MEMORY;
3415         }
3416
3417         result->abstract_syntax = *abstract_syntax;
3418         result->transfer_syntax = ndr_transfer_syntax;
3419         result->dispatch = cli_do_rpc_ndr;
3420
3421         result->desthost = get_myname(result);
3422         result->srv_name_slash = talloc_asprintf_strupper_m(
3423                 result, "\\\\%s", result->desthost);
3424         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3425                 status = NT_STATUS_NO_MEMORY;
3426                 goto fail;
3427         }
3428
3429         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3430         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3431
3432         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3433         if (fd == -1) {
3434                 status = map_nt_error_from_unix(errno);
3435                 goto fail;
3436         }
3437
3438         ZERO_STRUCT(addr);
3439         addr.sun_family = AF_UNIX;
3440         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3441
3442         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3443                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3444                           strerror(errno)));
3445                 close(fd);
3446                 return map_nt_error_from_unix(errno);
3447         }
3448
3449         status = rpc_transport_sock_init(result, fd, &result->transport);
3450         if (!NT_STATUS_IS_OK(status)) {
3451                 close(fd);
3452                 goto fail;
3453         }
3454
3455         *presult = result;
3456         return NT_STATUS_OK;
3457
3458  fail:
3459         TALLOC_FREE(result);
3460         return status;
3461 }
3462
3463 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3464 {
3465         struct cli_state *cli;
3466
3467         cli = rpc_pipe_np_smb_conn(p);
3468         if (cli != NULL) {
3469                 DLIST_REMOVE(cli->pipe_list, p);
3470         }
3471         return 0;
3472 }
3473
3474 /****************************************************************************
3475  Open a named pipe over SMB to a remote server.
3476  *
3477  * CAVEAT CALLER OF THIS FUNCTION:
3478  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3479  *    so be sure that this function is called AFTER any structure (vs pointer)
3480  *    assignment of the cli.  In particular, libsmbclient does structure
3481  *    assignments of cli, which invalidates the data in the returned
3482  *    rpc_pipe_client if this function is called before the structure assignment
3483  *    of cli.
3484  * 
3485  ****************************************************************************/
3486
3487 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3488                                  const struct ndr_syntax_id *abstract_syntax,
3489                                  struct rpc_pipe_client **presult)
3490 {
3491         struct rpc_pipe_client *result;
3492         NTSTATUS status;
3493
3494         /* sanity check to protect against crashes */
3495
3496         if ( !cli ) {
3497                 return NT_STATUS_INVALID_HANDLE;
3498         }
3499
3500         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3501         if (result == NULL) {
3502                 return NT_STATUS_NO_MEMORY;
3503         }
3504
3505         result->abstract_syntax = *abstract_syntax;
3506         result->transfer_syntax = ndr_transfer_syntax;
3507         result->dispatch = cli_do_rpc_ndr;
3508         result->desthost = talloc_strdup(result, cli->desthost);
3509         result->srv_name_slash = talloc_asprintf_strupper_m(
3510                 result, "\\\\%s", result->desthost);
3511
3512         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3513         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3514
3515         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3516                 TALLOC_FREE(result);
3517                 return NT_STATUS_NO_MEMORY;
3518         }
3519
3520         status = rpc_transport_np_init(result, cli, abstract_syntax,
3521                                        &result->transport);
3522         if (!NT_STATUS_IS_OK(status)) {
3523                 TALLOC_FREE(result);
3524                 return status;
3525         }
3526
3527         DLIST_ADD(cli->pipe_list, result);
3528         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3529
3530         *presult = result;
3531         return NT_STATUS_OK;
3532 }
3533
3534 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3535                              struct rpc_cli_smbd_conn *conn,
3536                              const struct ndr_syntax_id *syntax,
3537                              struct rpc_pipe_client **presult)
3538 {
3539         struct rpc_pipe_client *result;
3540         struct cli_pipe_auth_data *auth;
3541         NTSTATUS status;
3542
3543         result = talloc(mem_ctx, struct rpc_pipe_client);
3544         if (result == NULL) {
3545                 return NT_STATUS_NO_MEMORY;
3546         }
3547         result->abstract_syntax = *syntax;
3548         result->transfer_syntax = ndr_transfer_syntax;
3549         result->dispatch = cli_do_rpc_ndr;
3550         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3551         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3552
3553         result->desthost = talloc_strdup(result, global_myname());
3554         result->srv_name_slash = talloc_asprintf_strupper_m(
3555                 result, "\\\\%s", global_myname());
3556         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3557                 TALLOC_FREE(result);
3558                 return NT_STATUS_NO_MEMORY;
3559         }
3560
3561         status = rpc_transport_smbd_init(result, conn, syntax,
3562                                          &result->transport);
3563         if (!NT_STATUS_IS_OK(status)) {
3564                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3565                           nt_errstr(status)));
3566                 TALLOC_FREE(result);
3567                 return status;
3568         }
3569
3570         status = rpccli_anon_bind_data(result, &auth);
3571         if (!NT_STATUS_IS_OK(status)) {
3572                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3573                           nt_errstr(status)));
3574                 TALLOC_FREE(result);
3575                 return status;
3576         }
3577
3578         status = rpc_pipe_bind(result, auth);
3579         if (!NT_STATUS_IS_OK(status)) {
3580                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3581                 TALLOC_FREE(result);
3582                 return status;
3583         }
3584
3585         *presult = result;
3586         return NT_STATUS_OK;
3587 }
3588
3589 /****************************************************************************
3590  Open a pipe to a remote server.
3591  ****************************************************************************/
3592
3593 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3594                                   const struct ndr_syntax_id *interface,
3595                                   struct rpc_pipe_client **presult)
3596 {
3597         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3598                 /*
3599                  * We should have a better way to figure out this drsuapi
3600                  * speciality...
3601                  */
3602                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3603                                          presult);
3604         }
3605
3606         return rpc_pipe_open_np(cli, interface, presult);
3607 }
3608
3609 /****************************************************************************
3610  Open a named pipe to an SMB server and bind anonymously.
3611  ****************************************************************************/
3612
3613 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3614                                   const struct ndr_syntax_id *interface,
3615                                   struct rpc_pipe_client **presult)
3616 {
3617         struct rpc_pipe_client *result;
3618         struct cli_pipe_auth_data *auth;
3619         NTSTATUS status;
3620
3621         status = cli_rpc_pipe_open(cli, interface, &result);
3622         if (!NT_STATUS_IS_OK(status)) {
3623                 return status;
3624         }
3625
3626         status = rpccli_anon_bind_data(result, &auth);
3627         if (!NT_STATUS_IS_OK(status)) {
3628                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3629                           nt_errstr(status)));
3630                 TALLOC_FREE(result);
3631                 return status;
3632         }
3633
3634         /*
3635          * This is a bit of an abstraction violation due to the fact that an
3636          * anonymous bind on an authenticated SMB inherits the user/domain
3637          * from the enclosing SMB creds
3638          */
3639
3640         TALLOC_FREE(auth->user_name);
3641         TALLOC_FREE(auth->domain);
3642
3643         auth->user_name = talloc_strdup(auth, cli->user_name);
3644         auth->domain = talloc_strdup(auth, cli->domain);
3645         auth->user_session_key = data_blob_talloc(auth,
3646                 cli->user_session_key.data,
3647                 cli->user_session_key.length);
3648
3649         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3650                 TALLOC_FREE(result);
3651                 return NT_STATUS_NO_MEMORY;
3652         }
3653
3654         status = rpc_pipe_bind(result, auth);
3655         if (!NT_STATUS_IS_OK(status)) {
3656                 int lvl = 0;
3657                 if (ndr_syntax_id_equal(interface,
3658                                         &ndr_table_dssetup.syntax_id)) {
3659                         /* non AD domains just don't have this pipe, avoid
3660                          * level 0 statement in that case - gd */
3661                         lvl = 3;
3662                 }
3663                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3664                             "%s failed with error %s\n",
3665                             get_pipe_name_from_iface(interface),
3666                             nt_errstr(status) ));
3667                 TALLOC_FREE(result);
3668                 return status;
3669         }
3670
3671         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3672                   "%s and bound anonymously.\n",
3673                   get_pipe_name_from_iface(interface), cli->desthost));
3674
3675         *presult = result;
3676         return NT_STATUS_OK;
3677 }
3678
3679 /****************************************************************************
3680  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3681  ****************************************************************************/
3682
3683 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3684                                                    const struct ndr_syntax_id *interface,
3685                                                    enum pipe_auth_type auth_type,
3686                                                    enum pipe_auth_level auth_level,
3687                                                    const char *domain,
3688                                                    const char *username,
3689                                                    const char *password,
3690                                                    struct rpc_pipe_client **presult)
3691 {
3692         struct rpc_pipe_client *result;
3693         struct cli_pipe_auth_data *auth;
3694         NTSTATUS status;
3695
3696         status = cli_rpc_pipe_open(cli, interface, &result);
3697         if (!NT_STATUS_IS_OK(status)) {
3698                 return status;
3699         }
3700
3701         status = rpccli_ntlmssp_bind_data(
3702                 result, auth_type, auth_level, domain, username,
3703                 password, &auth);
3704         if (!NT_STATUS_IS_OK(status)) {
3705                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3706                           nt_errstr(status)));
3707                 goto err;
3708         }
3709
3710         status = rpc_pipe_bind(result, auth);
3711         if (!NT_STATUS_IS_OK(status)) {
3712                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3713                         nt_errstr(status) ));
3714                 goto err;
3715         }
3716
3717         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3718                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3719                   get_pipe_name_from_iface(interface), cli->desthost, domain,
3720                   username ));
3721
3722         *presult = result;
3723         return NT_STATUS_OK;
3724
3725   err:
3726
3727         TALLOC_FREE(result);
3728         return status;
3729 }
3730
3731 /****************************************************************************
3732  External interface.
3733  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3734  ****************************************************************************/
3735
3736 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3737                                    const struct ndr_syntax_id *interface,
3738                                    enum pipe_auth_level auth_level,
3739                                    const char *domain,
3740                                    const char *username,
3741                                    const char *password,
3742                                    struct rpc_pipe_client **presult)
3743 {
3744         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3745                                                 interface,
3746                                                 PIPE_AUTH_TYPE_NTLMSSP,
3747                                                 auth_level,
3748                                                 domain,
3749                                                 username,
3750                                                 password,
3751                                                 presult);
3752 }
3753
3754 /****************************************************************************
3755  External interface.
3756  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3757  ****************************************************************************/
3758
3759 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3760                                           const struct ndr_syntax_id *interface,
3761                                           enum pipe_auth_level auth_level,
3762                                           const char *domain,
3763                                           const char *username,
3764                                           const char *password,
3765                                           struct rpc_pipe_client **presult)
3766 {
3767         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3768                                                 interface,
3769                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3770                                                 auth_level,
3771                                                 domain,
3772                                                 username,
3773                                                 password,
3774                                                 presult);
3775 }
3776
3777 /****************************************************************************
3778   Get a the schannel session key out of an already opened netlogon pipe.
3779  ****************************************************************************/
3780 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3781                                                 struct cli_state *cli,
3782                                                 const char *domain,
3783                                                 uint32 *pneg_flags)
3784 {
3785         uint32 sec_chan_type = 0;
3786         unsigned char machine_pwd[16];
3787         const char *machine_account;
3788         NTSTATUS status;
3789
3790         /* Get the machine account credentials from secrets.tdb. */
3791         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3792                                &sec_chan_type))
3793         {
3794                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3795                         "trust account password for domain '%s'\n",
3796                         domain));
3797                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3798         }
3799
3800         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3801                                         cli->desthost, /* server name */
3802                                         domain,        /* domain */
3803                                         global_myname(), /* client name */
3804                                         machine_account, /* machine account name */
3805                                         machine_pwd,
3806                                         sec_chan_type,
3807                                         pneg_flags);
3808
3809         if (!NT_STATUS_IS_OK(status)) {
3810                 DEBUG(3, ("get_schannel_session_key_common: "
3811                           "rpccli_netlogon_setup_creds failed with result %s "
3812                           "to server %s, domain %s, machine account %s.\n",
3813                           nt_errstr(status), cli->desthost, domain,
3814                           machine_account ));
3815                 return status;
3816         }
3817
3818         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3819                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3820                         cli->desthost));
3821                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3822         }
3823
3824         return NT_STATUS_OK;;
3825 }
3826
3827 /****************************************************************************
3828  Open a netlogon pipe and get the schannel session key.
3829  Now exposed to external callers.
3830  ****************************************************************************/
3831
3832
3833 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3834                                   const char *domain,
3835                                   uint32 *pneg_flags,
3836                                   struct rpc_pipe_client **presult)
3837 {
3838         struct rpc_pipe_client *netlogon_pipe = NULL;
3839         NTSTATUS status;
3840
3841         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3842                                           &netlogon_pipe);
3843         if (!NT_STATUS_IS_OK(status)) {
3844                 return status;
3845         }
3846
3847         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3848                                                  pneg_flags);
3849         if (!NT_STATUS_IS_OK(status)) {
3850                 TALLOC_FREE(netlogon_pipe);
3851                 return status;
3852         }
3853
3854         *presult = netlogon_pipe;
3855         return NT_STATUS_OK;
3856 }
3857
3858 /****************************************************************************
3859  External interface.
3860  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3861  using session_key. sign and seal.
3862  ****************************************************************************/
3863
3864 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3865                                              const struct ndr_syntax_id *interface,
3866                                              enum pipe_auth_level auth_level,
3867                                              const char *domain,
3868                                              const struct dcinfo *pdc,
3869                                              struct rpc_pipe_client **presult)
3870 {
3871         struct rpc_pipe_client *result;
3872         struct cli_pipe_auth_data *auth;
3873         NTSTATUS status;
3874
3875         status = cli_rpc_pipe_open(cli, interface, &result);
3876         if (!NT_STATUS_IS_OK(status)) {
3877                 return status;
3878         }
3879
3880         status = rpccli_schannel_bind_data(result, domain, auth_level,
3881                                            pdc->sess_key, &auth);
3882         if (!NT_STATUS_IS_OK(status)) {
3883                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3884                           nt_errstr(status)));
3885                 TALLOC_FREE(result);
3886                 return status;
3887         }
3888
3889         status = rpc_pipe_bind(result, auth);
3890         if (!NT_STATUS_IS_OK(status)) {
3891                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3892                           "cli_rpc_pipe_bind failed with error %s\n",
3893                           nt_errstr(status) ));
3894                 TALLOC_FREE(result);
3895                 return status;
3896         }
3897
3898         /*
3899          * The credentials on a new netlogon pipe are the ones we are passed
3900          * in - copy them over.
3901          */
3902         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3903         if (result->dc == NULL) {
3904                 DEBUG(0, ("talloc failed\n"));
3905                 TALLOC_FREE(result);
3906                 return NT_STATUS_NO_MEMORY;
3907         }
3908
3909         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3910                   "for domain %s and bound using schannel.\n",
3911                   get_pipe_name_from_iface(interface),
3912                   cli->desthost, domain ));
3913
3914         *presult = result;
3915         return NT_STATUS_OK;
3916 }
3917
3918 /****************************************************************************
3919  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3920  Fetch the session key ourselves using a temporary netlogon pipe. This
3921  version uses an ntlmssp auth bound netlogon pipe to get the key.
3922  ****************************************************************************/
3923
3924 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3925                                                       const char *domain,
3926                                                       const char *username,
3927                                                       const char *password,
3928                                                       uint32 *pneg_flags,
3929                                                       struct rpc_pipe_client **presult)
3930 {
3931         struct rpc_pipe_client *netlogon_pipe = NULL;
3932         NTSTATUS status;
3933
3934         status = cli_rpc_pipe_open_spnego_ntlmssp(
3935                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3936                 domain, username, password, &netlogon_pipe);
3937         if (!NT_STATUS_IS_OK(status)) {
3938                 return status;
3939         }
3940
3941         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3942                                                  pneg_flags);
3943         if (!NT_STATUS_IS_OK(status)) {
3944                 TALLOC_FREE(netlogon_pipe);
3945                 return status;
3946         }
3947
3948         *presult = netlogon_pipe;
3949         return NT_STATUS_OK;
3950 }
3951
3952 /****************************************************************************
3953  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3954  Fetch the session key ourselves using a temporary netlogon pipe. This version
3955  uses an ntlmssp bind to get the session key.
3956  ****************************************************************************/
3957
3958 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3959                                                  const struct ndr_syntax_id *interface,
3960                                                  enum pipe_auth_level auth_level,
3961                                                  const char *domain,
3962                                                  const char *username,
3963                                                  const char *password,
3964                                                  struct rpc_pipe_client **presult)
3965 {
3966         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3967         struct rpc_pipe_client *netlogon_pipe = NULL;
3968         struct rpc_pipe_client *result = NULL;
3969         NTSTATUS status;
3970
3971         status = get_schannel_session_key_auth_ntlmssp(
3972                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3973         if (!NT_STATUS_IS_OK(status)) {
3974                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3975                         "key from server %s for domain %s.\n",
3976                         cli->desthost, domain ));
3977                 return status;
3978         }
3979
3980         status = cli_rpc_pipe_open_schannel_with_key(
3981                 cli, interface, auth_level, domain, netlogon_pipe->dc,
3982                 &result);
3983
3984         /* Now we've bound using the session key we can close the netlog pipe. */
3985         TALLOC_FREE(netlogon_pipe);
3986
3987         if (NT_STATUS_IS_OK(status)) {
3988                 *presult = result;
3989         }
3990         return status;
3991 }
3992
3993 /****************************************************************************
3994  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3995  Fetch the session key ourselves using a temporary netlogon pipe.
3996  ****************************************************************************/
3997
3998 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
3999                                     const struct ndr_syntax_id *interface,
4000                                     enum pipe_auth_level auth_level,
4001                                     const char *domain,
4002                                     struct rpc_pipe_client **presult)
4003 {
4004         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4005         struct rpc_pipe_client *netlogon_pipe = NULL;
4006         struct rpc_pipe_client *result = NULL;
4007         NTSTATUS status;
4008
4009         status = get_schannel_session_key(cli, domain, &neg_flags,
4010                                           &netlogon_pipe);
4011         if (!NT_STATUS_IS_OK(status)) {
4012                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4013                         "key from server %s for domain %s.\n",
4014                         cli->desthost, domain ));
4015                 return status;
4016         }
4017
4018         status = cli_rpc_pipe_open_schannel_with_key(
4019                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4020                 &result);
4021
4022         /* Now we've bound using the session key we can close the netlog pipe. */
4023         TALLOC_FREE(netlogon_pipe);
4024
4025         if (NT_STATUS_IS_OK(status)) {
4026                 *presult = result;
4027         }
4028
4029         return NT_STATUS_OK;
4030 }
4031
4032 /****************************************************************************
4033  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4034  The idea is this can be called with service_princ, username and password all
4035  NULL so long as the caller has a TGT.
4036  ****************************************************************************/
4037
4038 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4039                                 const struct ndr_syntax_id *interface,
4040                                 enum pipe_auth_level auth_level,
4041                                 const char *service_princ,
4042                                 const char *username,
4043                                 const char *password,
4044                                 struct rpc_pipe_client **presult)
4045 {
4046 #ifdef HAVE_KRB5
4047         struct rpc_pipe_client *result;
4048         struct cli_pipe_auth_data *auth;
4049         NTSTATUS status;
4050
4051         status = cli_rpc_pipe_open(cli, interface, &result);
4052         if (!NT_STATUS_IS_OK(status)) {
4053                 return status;
4054         }
4055
4056         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4057                                            username, password, &auth);
4058         if (!NT_STATUS_IS_OK(status)) {
4059                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4060                           nt_errstr(status)));
4061                 TALLOC_FREE(result);
4062                 return status;
4063         }
4064
4065         status = rpc_pipe_bind(result, auth);
4066         if (!NT_STATUS_IS_OK(status)) {
4067                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4068                           "with error %s\n", nt_errstr(status)));
4069                 TALLOC_FREE(result);
4070                 return status;
4071         }
4072
4073         *presult = result;
4074         return NT_STATUS_OK;
4075 #else
4076         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4077         return NT_STATUS_NOT_IMPLEMENTED;
4078 #endif
4079 }
4080
4081 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4082                              struct rpc_pipe_client *cli,
4083                              DATA_BLOB *session_key)
4084 {
4085         if (!session_key || !cli) {
4086                 return NT_STATUS_INVALID_PARAMETER;
4087         }
4088
4089         if (!cli->auth) {
4090                 return NT_STATUS_INVALID_PARAMETER;
4091         }
4092
4093         switch (cli->auth->auth_type) {
4094                 case PIPE_AUTH_TYPE_SCHANNEL:
4095                         *session_key = data_blob_talloc(mem_ctx,
4096                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4097                         break;
4098                 case PIPE_AUTH_TYPE_NTLMSSP:
4099                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4100                         *session_key = data_blob_talloc(mem_ctx,
4101                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4102                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4103                         break;
4104                 case PIPE_AUTH_TYPE_KRB5:
4105                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4106                         *session_key = data_blob_talloc(mem_ctx,
4107                                 cli->auth->a_u.kerberos_auth->session_key.data,
4108                                 cli->auth->a_u.kerberos_auth->session_key.length);
4109                         break;
4110                 case PIPE_AUTH_TYPE_NONE:
4111                         *session_key = data_blob_talloc(mem_ctx,
4112                                 cli->auth->user_session_key.data,
4113                                 cli->auth->user_session_key.length);
4114                         break;
4115                 default:
4116                         return NT_STATUS_NO_USER_SESSION_KEY;
4117         }
4118
4119         return NT_STATUS_OK;
4120 }