d4abe3c4fd1424fc0956dd50c0f1dcad9bdcca30
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct tevent_req *subreq);
211
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                         struct event_context *ev,
214                                         struct rpc_cli_transport *transport,
215                                         uint8_t *data, size_t size)
216 {
217         struct tevent_req *req, *subreq;
218         struct rpc_read_state *state;
219
220         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
221         if (req == NULL) {
222                 return NULL;
223         }
224         state->ev = ev;
225         state->transport = transport;
226         state->data = data;
227         state->size = size;
228         state->num_read = 0;
229
230         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
231
232         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
233                                       transport->priv);
234         if (subreq == NULL) {
235                 goto fail;
236         }
237         tevent_req_set_callback(subreq, rpc_read_done, req);
238         return req;
239
240  fail:
241         TALLOC_FREE(req);
242         return NULL;
243 }
244
245 static void rpc_read_done(struct tevent_req *subreq)
246 {
247         struct tevent_req *req = tevent_req_callback_data(
248                 subreq, struct tevent_req);
249         struct rpc_read_state *state = tevent_req_data(
250                 req, struct rpc_read_state);
251         NTSTATUS status;
252         ssize_t received;
253
254         status = state->transport->read_recv(subreq, &received);
255         TALLOC_FREE(subreq);
256         if (!NT_STATUS_IS_OK(status)) {
257                 tevent_req_nterror(req, status);
258                 return;
259         }
260
261         state->num_read += received;
262         if (state->num_read == state->size) {
263                 tevent_req_done(req);
264                 return;
265         }
266
267         subreq = state->transport->read_send(state, state->ev,
268                                              state->data + state->num_read,
269                                              state->size - state->num_read,
270                                              state->transport->priv);
271         if (tevent_req_nomem(subreq, req)) {
272                 return;
273         }
274         tevent_req_set_callback(subreq, rpc_read_done, req);
275 }
276
277 static NTSTATUS rpc_read_recv(struct tevent_req *req)
278 {
279         return tevent_req_simple_recv_ntstatus(req);
280 }
281
282 struct rpc_write_state {
283         struct event_context *ev;
284         struct rpc_cli_transport *transport;
285         const uint8_t *data;
286         size_t size;
287         size_t num_written;
288 };
289
290 static void rpc_write_done(struct async_req *subreq);
291
292 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
293                                          struct event_context *ev,
294                                          struct rpc_cli_transport *transport,
295                                          const uint8_t *data, size_t size)
296 {
297         struct tevent_req *req;
298         struct async_req *subreq;
299         struct rpc_write_state *state;
300
301         req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
302         if (req == NULL) {
303                 return NULL;
304         }
305         state->ev = ev;
306         state->transport = transport;
307         state->data = data;
308         state->size = size;
309         state->num_written = 0;
310
311         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
312
313         subreq = transport->write_send(state, ev, data, size, transport->priv);
314         if (subreq == NULL) {
315                 goto fail;
316         }
317         subreq->async.fn = rpc_write_done;
318         subreq->async.priv = req;
319         return req;
320  fail:
321         TALLOC_FREE(req);
322         return NULL;
323 }
324
325 static void rpc_write_done(struct async_req *subreq)
326 {
327         struct tevent_req *req = talloc_get_type_abort(
328                 subreq->async.priv, struct tevent_req);
329         struct rpc_write_state *state = tevent_req_data(
330                 req, struct rpc_write_state);
331         NTSTATUS status;
332         ssize_t written;
333
334         status = state->transport->write_recv(subreq, &written);
335         TALLOC_FREE(subreq);
336         if (!NT_STATUS_IS_OK(status)) {
337                 tevent_req_nterror(req, status);
338                 return;
339         }
340
341         state->num_written += written;
342
343         if (state->num_written == state->size) {
344                 tevent_req_done(req);
345                 return;
346         }
347
348         subreq = state->transport->write_send(state, state->ev,
349                                               state->data + state->num_written,
350                                               state->size - state->num_written,
351                                               state->transport->priv);
352         if (tevent_req_nomem(subreq, req)) {
353                 return;
354         }
355         subreq->async.fn = rpc_write_done;
356         subreq->async.priv = req;
357 }
358
359 static NTSTATUS rpc_write_recv(struct tevent_req *req)
360 {
361         return tevent_req_simple_recv_ntstatus(req);
362 }
363
364
365 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
366                                  struct rpc_hdr_info *prhdr,
367                                  prs_struct *pdu)
368 {
369         /*
370          * This next call sets the endian bit correctly in current_pdu. We
371          * will propagate this to rbuf later.
372          */
373
374         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
375                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
376                 return NT_STATUS_BUFFER_TOO_SMALL;
377         }
378
379         if (prhdr->frag_len > cli->max_recv_frag) {
380                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
381                           " we only allow %d\n", (int)prhdr->frag_len,
382                           (int)cli->max_recv_frag));
383                 return NT_STATUS_BUFFER_TOO_SMALL;
384         }
385
386         return NT_STATUS_OK;
387 }
388
389 /****************************************************************************
390  Try and get a PDU's worth of data from current_pdu. If not, then read more
391  from the wire.
392  ****************************************************************************/
393
394 struct get_complete_frag_state {
395         struct event_context *ev;
396         struct rpc_pipe_client *cli;
397         struct rpc_hdr_info *prhdr;
398         prs_struct *pdu;
399 };
400
401 static void get_complete_frag_got_header(struct tevent_req *subreq);
402 static void get_complete_frag_got_rest(struct tevent_req *subreq);
403
404 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
405                                                  struct event_context *ev,
406                                                  struct rpc_pipe_client *cli,
407                                                  struct rpc_hdr_info *prhdr,
408                                                  prs_struct *pdu)
409 {
410         struct tevent_req *req, *subreq;
411         struct get_complete_frag_state *state;
412         uint32_t pdu_len;
413         NTSTATUS status;
414
415         req = tevent_req_create(mem_ctx, &state,
416                                 struct get_complete_frag_state);
417         if (req == NULL) {
418                 return NULL;
419         }
420         state->ev = ev;
421         state->cli = cli;
422         state->prhdr = prhdr;
423         state->pdu = pdu;
424
425         pdu_len = prs_data_size(pdu);
426         if (pdu_len < RPC_HEADER_LEN) {
427                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
428                         status = NT_STATUS_NO_MEMORY;
429                         goto post_status;
430                 }
431                 subreq = rpc_read_send(
432                         state, state->ev,
433                         state->cli->transport,
434                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
435                         RPC_HEADER_LEN - pdu_len);
436                 if (subreq == NULL) {
437                         status = NT_STATUS_NO_MEMORY;
438                         goto post_status;
439                 }
440                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
441                                         req);
442                 return req;
443         }
444
445         status = parse_rpc_header(cli, prhdr, pdu);
446         if (!NT_STATUS_IS_OK(status)) {
447                 goto post_status;
448         }
449
450         /*
451          * Ensure we have frag_len bytes of data.
452          */
453         if (pdu_len < prhdr->frag_len) {
454                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
455                         status = NT_STATUS_NO_MEMORY;
456                         goto post_status;
457                 }
458                 subreq = rpc_read_send(state, state->ev,
459                                        state->cli->transport,
460                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
461                                        prhdr->frag_len - pdu_len);
462                 if (subreq == NULL) {
463                         status = NT_STATUS_NO_MEMORY;
464                         goto post_status;
465                 }
466                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
467                                         req);
468                 return req;
469         }
470
471         status = NT_STATUS_OK;
472  post_status:
473         if (NT_STATUS_IS_OK(status)) {
474                 tevent_req_done(req);
475         } else {
476                 tevent_req_nterror(req, status);
477         }
478         return tevent_req_post(req, ev);
479 }
480
481 static void get_complete_frag_got_header(struct tevent_req *subreq)
482 {
483         struct tevent_req *req = tevent_req_callback_data(
484                 subreq, struct tevent_req);
485         struct get_complete_frag_state *state = tevent_req_data(
486                 req, struct get_complete_frag_state);
487         NTSTATUS status;
488
489         status = rpc_read_recv(subreq);
490         TALLOC_FREE(subreq);
491         if (!NT_STATUS_IS_OK(status)) {
492                 tevent_req_nterror(req, status);
493                 return;
494         }
495
496         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
497         if (!NT_STATUS_IS_OK(status)) {
498                 tevent_req_nterror(req, status);
499                 return;
500         }
501
502         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
503                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
504                 return;
505         }
506
507         /*
508          * We're here in this piece of code because we've read exactly
509          * RPC_HEADER_LEN bytes into state->pdu.
510          */
511
512         subreq = rpc_read_send(
513                 state, state->ev, state->cli->transport,
514                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
515                 state->prhdr->frag_len - RPC_HEADER_LEN);
516         if (tevent_req_nomem(subreq, req)) {
517                 return;
518         }
519         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
520 }
521
522 static void get_complete_frag_got_rest(struct tevent_req *subreq)
523 {
524         struct tevent_req *req = tevent_req_callback_data(
525                 subreq, struct tevent_req);
526         NTSTATUS status;
527
528         status = rpc_read_recv(subreq);
529         TALLOC_FREE(subreq);
530         if (!NT_STATUS_IS_OK(status)) {
531                 tevent_req_nterror(req, status);
532                 return;
533         }
534         tevent_req_done(req);
535 }
536
537 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
538 {
539         return tevent_req_simple_recv_ntstatus(req);
540 }
541
542 /****************************************************************************
543  NTLMSSP specific sign/seal.
544  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
545  In fact I should probably abstract these into identical pieces of code... JRA.
546  ****************************************************************************/
547
548 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
549                                 prs_struct *current_pdu,
550                                 uint8 *p_ss_padding_len)
551 {
552         RPC_HDR_AUTH auth_info;
553         uint32 save_offset = prs_offset(current_pdu);
554         uint32 auth_len = prhdr->auth_len;
555         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
556         unsigned char *data = NULL;
557         size_t data_len;
558         unsigned char *full_packet_data = NULL;
559         size_t full_packet_data_len;
560         DATA_BLOB auth_blob;
561         NTSTATUS status;
562
563         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
564             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
565                 return NT_STATUS_OK;
566         }
567
568         if (!ntlmssp_state) {
569                 return NT_STATUS_INVALID_PARAMETER;
570         }
571
572         /* Ensure there's enough data for an authenticated response. */
573         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
574                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
575                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
576                         (unsigned int)auth_len ));
577                 return NT_STATUS_BUFFER_TOO_SMALL;
578         }
579
580         /*
581          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
582          * after the RPC header.
583          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
584          * functions as NTLMv2 checks the rpc headers also.
585          */
586
587         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
588         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
589
590         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
591         full_packet_data_len = prhdr->frag_len - auth_len;
592
593         /* Pull the auth header and the following data into a blob. */
594         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
595                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
596                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
597                 return NT_STATUS_BUFFER_TOO_SMALL;
598         }
599
600         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
601                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
602                 return NT_STATUS_BUFFER_TOO_SMALL;
603         }
604
605         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
606         auth_blob.length = auth_len;
607
608         switch (cli->auth->auth_level) {
609                 case PIPE_AUTH_LEVEL_PRIVACY:
610                         /* Data is encrypted. */
611                         status = ntlmssp_unseal_packet(ntlmssp_state,
612                                                         data, data_len,
613                                                         full_packet_data,
614                                                         full_packet_data_len,
615                                                         &auth_blob);
616                         if (!NT_STATUS_IS_OK(status)) {
617                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
618                                         "packet from %s. Error was %s.\n",
619                                         rpccli_pipe_txt(debug_ctx(), cli),
620                                         nt_errstr(status) ));
621                                 return status;
622                         }
623                         break;
624                 case PIPE_AUTH_LEVEL_INTEGRITY:
625                         /* Data is signed. */
626                         status = ntlmssp_check_packet(ntlmssp_state,
627                                                         data, data_len,
628                                                         full_packet_data,
629                                                         full_packet_data_len,
630                                                         &auth_blob);
631                         if (!NT_STATUS_IS_OK(status)) {
632                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
633                                         "packet from %s. Error was %s.\n",
634                                         rpccli_pipe_txt(debug_ctx(), cli),
635                                         nt_errstr(status) ));
636                                 return status;
637                         }
638                         break;
639                 default:
640                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
641                                   "auth level %d\n", cli->auth->auth_level));
642                         return NT_STATUS_INVALID_INFO_CLASS;
643         }
644
645         /*
646          * Return the current pointer to the data offset.
647          */
648
649         if(!prs_set_offset(current_pdu, save_offset)) {
650                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
651                         (unsigned int)save_offset ));
652                 return NT_STATUS_BUFFER_TOO_SMALL;
653         }
654
655         /*
656          * Remember the padding length. We must remove it from the real data
657          * stream once the sign/seal is done.
658          */
659
660         *p_ss_padding_len = auth_info.auth_pad_len;
661
662         return NT_STATUS_OK;
663 }
664
665 /****************************************************************************
666  schannel specific sign/seal.
667  ****************************************************************************/
668
669 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
670                                 prs_struct *current_pdu,
671                                 uint8 *p_ss_padding_len)
672 {
673         RPC_HDR_AUTH auth_info;
674         RPC_AUTH_SCHANNEL_CHK schannel_chk;
675         uint32 auth_len = prhdr->auth_len;
676         uint32 save_offset = prs_offset(current_pdu);
677         struct schannel_auth_struct *schannel_auth =
678                 cli->auth->a_u.schannel_auth;
679         uint32 data_len;
680
681         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
682             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
683                 return NT_STATUS_OK;
684         }
685
686         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
687                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
688                 return NT_STATUS_INVALID_PARAMETER;
689         }
690
691         if (!schannel_auth) {
692                 return NT_STATUS_INVALID_PARAMETER;
693         }
694
695         /* Ensure there's enough data for an authenticated response. */
696         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
697                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
698                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
699                         (unsigned int)auth_len ));
700                 return NT_STATUS_INVALID_PARAMETER;
701         }
702
703         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
704
705         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
706                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
707                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
708                 return NT_STATUS_BUFFER_TOO_SMALL;
709         }
710
711         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
712                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
713                 return NT_STATUS_BUFFER_TOO_SMALL;
714         }
715
716         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
717                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
718                         auth_info.auth_type));
719                 return NT_STATUS_BUFFER_TOO_SMALL;
720         }
721
722         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
723                                 &schannel_chk, current_pdu, 0)) {
724                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
725                 return NT_STATUS_BUFFER_TOO_SMALL;
726         }
727
728         if (!schannel_decode(schannel_auth,
729                         cli->auth->auth_level,
730                         SENDER_IS_ACCEPTOR,
731                         &schannel_chk,
732                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
733                         data_len)) {
734                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
735                                 "Connection to %s.\n",
736                                 rpccli_pipe_txt(debug_ctx(), cli)));
737                 return NT_STATUS_INVALID_PARAMETER;
738         }
739
740         /* The sequence number gets incremented on both send and receive. */
741         schannel_auth->seq_num++;
742
743         /*
744          * Return the current pointer to the data offset.
745          */
746
747         if(!prs_set_offset(current_pdu, save_offset)) {
748                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
749                         (unsigned int)save_offset ));
750                 return NT_STATUS_BUFFER_TOO_SMALL;
751         }
752
753         /*
754          * Remember the padding length. We must remove it from the real data
755          * stream once the sign/seal is done.
756          */
757
758         *p_ss_padding_len = auth_info.auth_pad_len;
759
760         return NT_STATUS_OK;
761 }
762
763 /****************************************************************************
764  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
765  ****************************************************************************/
766
767 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
768                                 prs_struct *current_pdu,
769                                 uint8 *p_ss_padding_len)
770 {
771         NTSTATUS ret = NT_STATUS_OK;
772
773         /* Paranioa checks for auth_len. */
774         if (prhdr->auth_len) {
775                 if (prhdr->auth_len > prhdr->frag_len) {
776                         return NT_STATUS_INVALID_PARAMETER;
777                 }
778
779                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
780                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
781                         /* Integer wrap attempt. */
782                         return NT_STATUS_INVALID_PARAMETER;
783                 }
784         }
785
786         /*
787          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
788          */
789
790         switch(cli->auth->auth_type) {
791                 case PIPE_AUTH_TYPE_NONE:
792                         if (prhdr->auth_len) {
793                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
794                                           "Connection to %s - got non-zero "
795                                           "auth len %u.\n",
796                                         rpccli_pipe_txt(debug_ctx(), cli),
797                                         (unsigned int)prhdr->auth_len ));
798                                 return NT_STATUS_INVALID_PARAMETER;
799                         }
800                         break;
801
802                 case PIPE_AUTH_TYPE_NTLMSSP:
803                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
804                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
805                         if (!NT_STATUS_IS_OK(ret)) {
806                                 return ret;
807                         }
808                         break;
809
810                 case PIPE_AUTH_TYPE_SCHANNEL:
811                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
812                         if (!NT_STATUS_IS_OK(ret)) {
813                                 return ret;
814                         }
815                         break;
816
817                 case PIPE_AUTH_TYPE_KRB5:
818                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
819                 default:
820                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
821                                   "to %s - unknown internal auth type %u.\n",
822                                   rpccli_pipe_txt(debug_ctx(), cli),
823                                   cli->auth->auth_type ));
824                         return NT_STATUS_INVALID_INFO_CLASS;
825         }
826
827         return NT_STATUS_OK;
828 }
829
830 /****************************************************************************
831  Do basic authentication checks on an incoming pdu.
832  ****************************************************************************/
833
834 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
835                         prs_struct *current_pdu,
836                         uint8 expected_pkt_type,
837                         char **ppdata,
838                         uint32 *pdata_len,
839                         prs_struct *return_data)
840 {
841
842         NTSTATUS ret = NT_STATUS_OK;
843         uint32 current_pdu_len = prs_data_size(current_pdu);
844
845         if (current_pdu_len != prhdr->frag_len) {
846                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
847                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
848                 return NT_STATUS_INVALID_PARAMETER;
849         }
850
851         /*
852          * Point the return values at the real data including the RPC
853          * header. Just in case the caller wants it.
854          */
855         *ppdata = prs_data_p(current_pdu);
856         *pdata_len = current_pdu_len;
857
858         /* Ensure we have the correct type. */
859         switch (prhdr->pkt_type) {
860                 case RPC_ALTCONTRESP:
861                 case RPC_BINDACK:
862
863                         /* Alter context and bind ack share the same packet definitions. */
864                         break;
865
866
867                 case RPC_RESPONSE:
868                 {
869                         RPC_HDR_RESP rhdr_resp;
870                         uint8 ss_padding_len = 0;
871
872                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
873                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
874                                 return NT_STATUS_BUFFER_TOO_SMALL;
875                         }
876
877                         /* Here's where we deal with incoming sign/seal. */
878                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
879                                         current_pdu, &ss_padding_len);
880                         if (!NT_STATUS_IS_OK(ret)) {
881                                 return ret;
882                         }
883
884                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
885                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
886
887                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
888                                 return NT_STATUS_BUFFER_TOO_SMALL;
889                         }
890
891                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
892
893                         /* Remember to remove the auth footer. */
894                         if (prhdr->auth_len) {
895                                 /* We've already done integer wrap tests on auth_len in
896                                         cli_pipe_validate_rpc_response(). */
897                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
898                                         return NT_STATUS_BUFFER_TOO_SMALL;
899                                 }
900                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
901                         }
902
903                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
904                                 current_pdu_len, *pdata_len, ss_padding_len ));
905
906                         /*
907                          * If this is the first reply, and the allocation hint is reasonably, try and
908                          * set up the return_data parse_struct to the correct size.
909                          */
910
911                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
912                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
913                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
914                                                 "too large to allocate\n",
915                                                 (unsigned int)rhdr_resp.alloc_hint ));
916                                         return NT_STATUS_NO_MEMORY;
917                                 }
918                         }
919
920                         break;
921                 }
922
923                 case RPC_BINDNACK:
924                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
925                                   "received from %s!\n",
926                                   rpccli_pipe_txt(debug_ctx(), cli)));
927                         /* Use this for now... */
928                         return NT_STATUS_NETWORK_ACCESS_DENIED;
929
930                 case RPC_FAULT:
931                 {
932                         RPC_HDR_RESP rhdr_resp;
933                         RPC_HDR_FAULT fault_resp;
934
935                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
936                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
937                                 return NT_STATUS_BUFFER_TOO_SMALL;
938                         }
939
940                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
941                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
942                                 return NT_STATUS_BUFFER_TOO_SMALL;
943                         }
944
945                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
946                                   "code %s received from %s!\n",
947                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
948                                 rpccli_pipe_txt(debug_ctx(), cli)));
949                         if (NT_STATUS_IS_OK(fault_resp.status)) {
950                                 return NT_STATUS_UNSUCCESSFUL;
951                         } else {
952                                 return fault_resp.status;
953                         }
954                 }
955
956                 default:
957                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
958                                 "from %s!\n",
959                                 (unsigned int)prhdr->pkt_type,
960                                 rpccli_pipe_txt(debug_ctx(), cli)));
961                         return NT_STATUS_INVALID_INFO_CLASS;
962         }
963
964         if (prhdr->pkt_type != expected_pkt_type) {
965                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
966                           "got an unexpected RPC packet type - %u, not %u\n",
967                         rpccli_pipe_txt(debug_ctx(), cli),
968                         prhdr->pkt_type,
969                         expected_pkt_type));
970                 return NT_STATUS_INVALID_INFO_CLASS;
971         }
972
973         /* Do this just before return - we don't want to modify any rpc header
974            data before now as we may have needed to do cryptographic actions on
975            it before. */
976
977         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
978                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
979                         "setting fragment first/last ON.\n"));
980                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
981         }
982
983         return NT_STATUS_OK;
984 }
985
986 /****************************************************************************
987  Ensure we eat the just processed pdu from the current_pdu prs_struct.
988  Normally the frag_len and buffer size will match, but on the first trans
989  reply there is a theoretical chance that buffer size > frag_len, so we must
990  deal with that.
991  ****************************************************************************/
992
993 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
994 {
995         uint32 current_pdu_len = prs_data_size(current_pdu);
996
997         if (current_pdu_len < prhdr->frag_len) {
998                 return NT_STATUS_BUFFER_TOO_SMALL;
999         }
1000
1001         /* Common case. */
1002         if (current_pdu_len == (uint32)prhdr->frag_len) {
1003                 prs_mem_free(current_pdu);
1004                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1005                 /* Make current_pdu dynamic with no memory. */
1006                 prs_give_memory(current_pdu, 0, 0, True);
1007                 return NT_STATUS_OK;
1008         }
1009
1010         /*
1011          * Oh no ! More data in buffer than we processed in current pdu.
1012          * Cheat. Move the data down and shrink the buffer.
1013          */
1014
1015         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1016                         current_pdu_len - prhdr->frag_len);
1017
1018         /* Remember to set the read offset back to zero. */
1019         prs_set_offset(current_pdu, 0);
1020
1021         /* Shrink the buffer. */
1022         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1023                 return NT_STATUS_BUFFER_TOO_SMALL;
1024         }
1025
1026         return NT_STATUS_OK;
1027 }
1028
1029 /****************************************************************************
1030  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1031 ****************************************************************************/
1032
1033 struct cli_api_pipe_state {
1034         struct event_context *ev;
1035         struct rpc_cli_transport *transport;
1036         uint8_t *rdata;
1037         uint32_t rdata_len;
1038 };
1039
1040 static void cli_api_pipe_trans_done(struct async_req *subreq);
1041 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1042 static void cli_api_pipe_read_done(struct tevent_req *subreq);
1043
1044 static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1045                                             struct event_context *ev,
1046                                             struct rpc_cli_transport *transport,
1047                                             uint8_t *data, size_t data_len,
1048                                             uint32_t max_rdata_len)
1049 {
1050         struct tevent_req *req;
1051         struct async_req *subreq;
1052         struct tevent_req *subreq2;
1053         struct cli_api_pipe_state *state;
1054         NTSTATUS status;
1055
1056         req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
1057         if (req == NULL) {
1058                 return NULL;
1059         }
1060         state->ev = ev;
1061         state->transport = transport;
1062
1063         if (max_rdata_len < RPC_HEADER_LEN) {
1064                 /*
1065                  * For a RPC reply we always need at least RPC_HEADER_LEN
1066                  * bytes. We check this here because we will receive
1067                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1068                  */
1069                 status = NT_STATUS_INVALID_PARAMETER;
1070                 goto post_status;
1071         }
1072
1073         if (transport->trans_send != NULL) {
1074                 subreq = transport->trans_send(state, ev, data, data_len,
1075                                                max_rdata_len, transport->priv);
1076                 if (subreq == NULL) {
1077                         status = NT_STATUS_NO_MEMORY;
1078                         goto post_status;
1079                 }
1080                 subreq->async.fn = cli_api_pipe_trans_done;
1081                 subreq->async.priv = req;
1082                 return req;
1083         }
1084
1085         /*
1086          * If the transport does not provide a "trans" routine, i.e. for
1087          * example the ncacn_ip_tcp transport, do the write/read step here.
1088          */
1089
1090         subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1091         if (subreq2 == NULL) {
1092                 goto fail;
1093         }
1094         tevent_req_set_callback(subreq2, cli_api_pipe_write_done, req);
1095         return req;
1096
1097         status = NT_STATUS_INVALID_PARAMETER;
1098
1099  post_status:
1100         if (NT_STATUS_IS_OK(status)) {
1101                 tevent_req_done(req);
1102         } else {
1103                 tevent_req_nterror(req, status);
1104         }
1105         return tevent_req_post(req, ev);
1106  fail:
1107         TALLOC_FREE(req);
1108         return NULL;
1109 }
1110
1111 static void cli_api_pipe_trans_done(struct async_req *subreq)
1112 {
1113         struct tevent_req *req = talloc_get_type_abort(
1114                 subreq->async.priv, struct tevent_req);
1115         struct cli_api_pipe_state *state = tevent_req_data(
1116                 req, struct cli_api_pipe_state);
1117         NTSTATUS status;
1118
1119         status = state->transport->trans_recv(subreq, state, &state->rdata,
1120                                               &state->rdata_len);
1121         TALLOC_FREE(subreq);
1122         if (!NT_STATUS_IS_OK(status)) {
1123                 tevent_req_nterror(req, status);
1124                 return;
1125         }
1126         tevent_req_done(req);
1127 }
1128
1129 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1130 {
1131         struct tevent_req *req = tevent_req_callback_data(
1132                 subreq, struct tevent_req);
1133         struct cli_api_pipe_state *state = tevent_req_data(
1134                 req, struct cli_api_pipe_state);
1135         NTSTATUS status;
1136
1137         status = rpc_write_recv(subreq);
1138         TALLOC_FREE(subreq);
1139         if (!NT_STATUS_IS_OK(status)) {
1140                 tevent_req_nterror(req, status);
1141                 return;
1142         }
1143
1144         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1145         if (tevent_req_nomem(state->rdata, req)) {
1146                 return;
1147         }
1148
1149         /*
1150          * We don't need to use rpc_read_send here, the upper layer will cope
1151          * with a short read, transport->trans_send could also return less
1152          * than state->max_rdata_len.
1153          */
1154         subreq = state->transport->read_send(state, state->ev, state->rdata,
1155                                              RPC_HEADER_LEN,
1156                                              state->transport->priv);
1157         if (tevent_req_nomem(subreq, req)) {
1158                 return;
1159         }
1160         tevent_req_set_callback(subreq, cli_api_pipe_read_done, req);
1161 }
1162
1163 static void cli_api_pipe_read_done(struct tevent_req *subreq)
1164 {
1165         struct tevent_req *req = tevent_req_callback_data(
1166                 subreq, struct tevent_req);
1167         struct cli_api_pipe_state *state = tevent_req_data(
1168                 req, struct cli_api_pipe_state);
1169         NTSTATUS status;
1170         ssize_t received;
1171
1172         status = state->transport->read_recv(subreq, &received);
1173         TALLOC_FREE(subreq);
1174         if (!NT_STATUS_IS_OK(status)) {
1175                 tevent_req_nterror(req, status);
1176                 return;
1177         }
1178         state->rdata_len = received;
1179         tevent_req_done(req);
1180 }
1181
1182 static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1183                                   uint8_t **prdata, uint32_t *prdata_len)
1184 {
1185         struct cli_api_pipe_state *state = tevent_req_data(
1186                 req, struct cli_api_pipe_state);
1187         NTSTATUS status;
1188
1189         if (tevent_req_is_nterror(req, &status)) {
1190                 return status;
1191         }
1192
1193         *prdata = talloc_move(mem_ctx, &state->rdata);
1194         *prdata_len = state->rdata_len;
1195         return NT_STATUS_OK;
1196 }
1197
1198 /****************************************************************************
1199  Send data on an rpc pipe via trans. The prs_struct data must be the last
1200  pdu fragment of an NDR data stream.
1201
1202  Receive response data from an rpc pipe, which may be large...
1203
1204  Read the first fragment: unfortunately have to use SMBtrans for the first
1205  bit, then SMBreadX for subsequent bits.
1206
1207  If first fragment received also wasn't the last fragment, continue
1208  getting fragments until we _do_ receive the last fragment.
1209
1210  Request/Response PDU's look like the following...
1211
1212  |<------------------PDU len----------------------------------------------->|
1213  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1214
1215  +------------+-----------------+-------------+---------------+-------------+
1216  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1217  +------------+-----------------+-------------+---------------+-------------+
1218
1219  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1220  signing & sealing being negotiated.
1221
1222  ****************************************************************************/
1223
1224 struct rpc_api_pipe_state {
1225         struct event_context *ev;
1226         struct rpc_pipe_client *cli;
1227         uint8_t expected_pkt_type;
1228
1229         prs_struct incoming_frag;
1230         struct rpc_hdr_info rhdr;
1231
1232         prs_struct incoming_pdu;        /* Incoming reply */
1233         uint32_t incoming_pdu_offset;
1234 };
1235
1236 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1237 {
1238         prs_mem_free(&state->incoming_frag);
1239         prs_mem_free(&state->incoming_pdu);
1240         return 0;
1241 }
1242
1243 static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
1244 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1245
1246 static struct tevent_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1247                                             struct event_context *ev,
1248                                             struct rpc_pipe_client *cli,
1249                                             prs_struct *data, /* Outgoing PDU */
1250                                             uint8_t expected_pkt_type)
1251 {
1252         struct tevent_req *req, *subreq;
1253         struct rpc_api_pipe_state *state;
1254         uint16_t max_recv_frag;
1255         NTSTATUS status;
1256
1257         req = tevent_req_create(mem_ctx, &state, struct rpc_api_pipe_state);
1258         if (req == NULL) {
1259                 return NULL;
1260         }
1261         state->ev = ev;
1262         state->cli = cli;
1263         state->expected_pkt_type = expected_pkt_type;
1264         state->incoming_pdu_offset = 0;
1265
1266         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1267
1268         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1269         /* Make incoming_pdu dynamic with no memory. */
1270         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1271
1272         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1273
1274         /*
1275          * Ensure we're not sending too much.
1276          */
1277         if (prs_offset(data) > cli->max_xmit_frag) {
1278                 status = NT_STATUS_INVALID_PARAMETER;
1279                 goto post_status;
1280         }
1281
1282         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1283
1284         max_recv_frag = cli->max_recv_frag;
1285
1286 #ifdef DEVELOPER
1287         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1288 #endif
1289
1290         subreq = cli_api_pipe_send(state, ev, cli->transport,
1291                                    (uint8_t *)prs_data_p(data),
1292                                    prs_offset(data), max_recv_frag);
1293         if (subreq == NULL) {
1294                 goto fail;
1295         }
1296         tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, req);
1297         return req;
1298
1299  post_status:
1300         tevent_req_nterror(req, status);
1301         return tevent_req_post(req, ev);
1302  fail:
1303         TALLOC_FREE(req);
1304         return NULL;
1305 }
1306
1307 static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
1308 {
1309         struct tevent_req *req = tevent_req_callback_data(
1310                 subreq, struct tevent_req);
1311         struct rpc_api_pipe_state *state = tevent_req_data(
1312                 req, struct rpc_api_pipe_state);
1313         NTSTATUS status;
1314         uint8_t *rdata = NULL;
1315         uint32_t rdata_len = 0;
1316         char *rdata_copy;
1317
1318         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1319         TALLOC_FREE(subreq);
1320         if (!NT_STATUS_IS_OK(status)) {
1321                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1322                 tevent_req_nterror(req, status);
1323                 return;
1324         }
1325
1326         if (rdata == NULL) {
1327                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1328                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1329                 tevent_req_done(req);
1330                 return;
1331         }
1332
1333         /*
1334          * Give the memory received from cli_trans as dynamic to the current
1335          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1336          * :-(
1337          */
1338         rdata_copy = (char *)memdup(rdata, rdata_len);
1339         TALLOC_FREE(rdata);
1340         if (tevent_req_nomem(rdata_copy, req)) {
1341                 return;
1342         }
1343         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1344
1345         /* Ensure we have enough data for a pdu. */
1346         subreq = get_complete_frag_send(state, state->ev, state->cli,
1347                                         &state->rhdr, &state->incoming_frag);
1348         if (tevent_req_nomem(subreq, req)) {
1349                 return;
1350         }
1351         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1352 }
1353
1354 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1355 {
1356         struct tevent_req *req = tevent_req_callback_data(
1357                 subreq, struct tevent_req);
1358         struct rpc_api_pipe_state *state = tevent_req_data(
1359                 req, struct rpc_api_pipe_state);
1360         NTSTATUS status;
1361         char *rdata = NULL;
1362         uint32_t rdata_len = 0;
1363
1364         status = get_complete_frag_recv(subreq);
1365         TALLOC_FREE(subreq);
1366         if (!NT_STATUS_IS_OK(status)) {
1367                 DEBUG(5, ("get_complete_frag failed: %s\n",
1368                           nt_errstr(status)));
1369                 tevent_req_nterror(req, status);
1370                 return;
1371         }
1372
1373         status = cli_pipe_validate_current_pdu(
1374                 state->cli, &state->rhdr, &state->incoming_frag,
1375                 state->expected_pkt_type, &rdata, &rdata_len,
1376                 &state->incoming_pdu);
1377
1378         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1379                   (unsigned)prs_data_size(&state->incoming_frag),
1380                   (unsigned)state->incoming_pdu_offset,
1381                   nt_errstr(status)));
1382
1383         if (!NT_STATUS_IS_OK(status)) {
1384                 tevent_req_nterror(req, status);
1385                 return;
1386         }
1387
1388         if ((state->rhdr.flags & RPC_FLG_FIRST)
1389             && (state->rhdr.pack_type[0] == 0)) {
1390                 /*
1391                  * Set the data type correctly for big-endian data on the
1392                  * first packet.
1393                  */
1394                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1395                           "big-endian.\n",
1396                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1397                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1398         }
1399         /*
1400          * Check endianness on subsequent packets.
1401          */
1402         if (state->incoming_frag.bigendian_data
1403             != state->incoming_pdu.bigendian_data) {
1404                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1405                          "%s\n",
1406                          state->incoming_pdu.bigendian_data?"big":"little",
1407                          state->incoming_frag.bigendian_data?"big":"little"));
1408                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1409                 return;
1410         }
1411
1412         /* Now copy the data portion out of the pdu into rbuf. */
1413         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1414                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1415                 return;
1416         }
1417
1418         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1419                rdata, (size_t)rdata_len);
1420         state->incoming_pdu_offset += rdata_len;
1421
1422         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1423                                             &state->incoming_frag);
1424         if (!NT_STATUS_IS_OK(status)) {
1425                 tevent_req_nterror(req, status);
1426                 return;
1427         }
1428
1429         if (state->rhdr.flags & RPC_FLG_LAST) {
1430                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1431                           rpccli_pipe_txt(debug_ctx(), state->cli),
1432                           (unsigned)prs_data_size(&state->incoming_pdu)));
1433                 tevent_req_done(req);
1434                 return;
1435         }
1436
1437         subreq = get_complete_frag_send(state, state->ev, state->cli,
1438                                         &state->rhdr, &state->incoming_frag);
1439         if (tevent_req_nomem(subreq, req)) {
1440                 return;
1441         }
1442         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1443 }
1444
1445 static NTSTATUS rpc_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1446                                   prs_struct *reply_pdu)
1447 {
1448         struct rpc_api_pipe_state *state = tevent_req_data(
1449                 req, struct rpc_api_pipe_state);
1450         NTSTATUS status;
1451
1452         if (tevent_req_is_nterror(req, &status)) {
1453                 return status;
1454         }
1455
1456         *reply_pdu = state->incoming_pdu;
1457         reply_pdu->mem_ctx = mem_ctx;
1458
1459         /*
1460          * Prevent state->incoming_pdu from being freed in
1461          * rpc_api_pipe_state_destructor()
1462          */
1463         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1464
1465         return NT_STATUS_OK;
1466 }
1467
1468 /*******************************************************************
1469  Creates krb5 auth bind.
1470  ********************************************************************/
1471
1472 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1473                                                 enum pipe_auth_level auth_level,
1474                                                 RPC_HDR_AUTH *pauth_out,
1475                                                 prs_struct *auth_data)
1476 {
1477 #ifdef HAVE_KRB5
1478         int ret;
1479         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1480         DATA_BLOB tkt = data_blob_null;
1481         DATA_BLOB tkt_wrapped = data_blob_null;
1482
1483         /* We may change the pad length before marshalling. */
1484         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1485
1486         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1487                 a->service_principal ));
1488
1489         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1490
1491         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1492                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1493
1494         if (ret) {
1495                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1496                         "failed with %s\n",
1497                         a->service_principal,
1498                         error_message(ret) ));
1499
1500                 data_blob_free(&tkt);
1501                 prs_mem_free(auth_data);
1502                 return NT_STATUS_INVALID_PARAMETER;
1503         }
1504
1505         /* wrap that up in a nice GSS-API wrapping */
1506         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1507
1508         data_blob_free(&tkt);
1509
1510         /* Auth len in the rpc header doesn't include auth_header. */
1511         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1512                 data_blob_free(&tkt_wrapped);
1513                 prs_mem_free(auth_data);
1514                 return NT_STATUS_NO_MEMORY;
1515         }
1516
1517         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1518         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1519
1520         data_blob_free(&tkt_wrapped);
1521         return NT_STATUS_OK;
1522 #else
1523         return NT_STATUS_INVALID_PARAMETER;
1524 #endif
1525 }
1526
1527 /*******************************************************************
1528  Creates SPNEGO NTLMSSP auth bind.
1529  ********************************************************************/
1530
1531 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1532                                                 enum pipe_auth_level auth_level,
1533                                                 RPC_HDR_AUTH *pauth_out,
1534                                                 prs_struct *auth_data)
1535 {
1536         NTSTATUS nt_status;
1537         DATA_BLOB null_blob = data_blob_null;
1538         DATA_BLOB request = data_blob_null;
1539         DATA_BLOB spnego_msg = data_blob_null;
1540
1541         /* We may change the pad length before marshalling. */
1542         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1543
1544         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1545         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1546                                         null_blob,
1547                                         &request);
1548
1549         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1550                 data_blob_free(&request);
1551                 prs_mem_free(auth_data);
1552                 return nt_status;
1553         }
1554
1555         /* Wrap this in SPNEGO. */
1556         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1557
1558         data_blob_free(&request);
1559
1560         /* Auth len in the rpc header doesn't include auth_header. */
1561         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1562                 data_blob_free(&spnego_msg);
1563                 prs_mem_free(auth_data);
1564                 return NT_STATUS_NO_MEMORY;
1565         }
1566
1567         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1568         dump_data(5, spnego_msg.data, spnego_msg.length);
1569
1570         data_blob_free(&spnego_msg);
1571         return NT_STATUS_OK;
1572 }
1573
1574 /*******************************************************************
1575  Creates NTLMSSP auth bind.
1576  ********************************************************************/
1577
1578 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1579                                                 enum pipe_auth_level auth_level,
1580                                                 RPC_HDR_AUTH *pauth_out,
1581                                                 prs_struct *auth_data)
1582 {
1583         NTSTATUS nt_status;
1584         DATA_BLOB null_blob = data_blob_null;
1585         DATA_BLOB request = data_blob_null;
1586
1587         /* We may change the pad length before marshalling. */
1588         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1589
1590         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1591         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1592                                         null_blob,
1593                                         &request);
1594
1595         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1596                 data_blob_free(&request);
1597                 prs_mem_free(auth_data);
1598                 return nt_status;
1599         }
1600
1601         /* Auth len in the rpc header doesn't include auth_header. */
1602         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1603                 data_blob_free(&request);
1604                 prs_mem_free(auth_data);
1605                 return NT_STATUS_NO_MEMORY;
1606         }
1607
1608         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1609         dump_data(5, request.data, request.length);
1610
1611         data_blob_free(&request);
1612         return NT_STATUS_OK;
1613 }
1614
1615 /*******************************************************************
1616  Creates schannel auth bind.
1617  ********************************************************************/
1618
1619 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1620                                                 enum pipe_auth_level auth_level,
1621                                                 RPC_HDR_AUTH *pauth_out,
1622                                                 prs_struct *auth_data)
1623 {
1624         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1625
1626         /* We may change the pad length before marshalling. */
1627         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1628
1629         /* Use lp_workgroup() if domain not specified */
1630
1631         if (!cli->auth->domain || !cli->auth->domain[0]) {
1632                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1633                 if (cli->auth->domain == NULL) {
1634                         return NT_STATUS_NO_MEMORY;
1635                 }
1636         }
1637
1638         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1639                                    global_myname());
1640
1641         /*
1642          * Now marshall the data into the auth parse_struct.
1643          */
1644
1645         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1646                                        &schannel_neg, auth_data, 0)) {
1647                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1648                 prs_mem_free(auth_data);
1649                 return NT_STATUS_NO_MEMORY;
1650         }
1651
1652         return NT_STATUS_OK;
1653 }
1654
1655 /*******************************************************************
1656  Creates the internals of a DCE/RPC bind request or alter context PDU.
1657  ********************************************************************/
1658
1659 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1660                                                 prs_struct *rpc_out, 
1661                                                 uint32 rpc_call_id,
1662                                                 const RPC_IFACE *abstract,
1663                                                 const RPC_IFACE *transfer,
1664                                                 RPC_HDR_AUTH *phdr_auth,
1665                                                 prs_struct *pauth_info)
1666 {
1667         RPC_HDR hdr;
1668         RPC_HDR_RB hdr_rb;
1669         RPC_CONTEXT rpc_ctx;
1670         uint16 auth_len = prs_offset(pauth_info);
1671         uint8 ss_padding_len = 0;
1672         uint16 frag_len = 0;
1673
1674         /* create the RPC context. */
1675         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1676
1677         /* create the bind request RPC_HDR_RB */
1678         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1679
1680         /* Start building the frag length. */
1681         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1682
1683         /* Do we need to pad ? */
1684         if (auth_len) {
1685                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1686                 if (data_len % 8) {
1687                         ss_padding_len = 8 - (data_len % 8);
1688                         phdr_auth->auth_pad_len = ss_padding_len;
1689                 }
1690                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1691         }
1692
1693         /* Create the request RPC_HDR */
1694         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1695
1696         /* Marshall the RPC header */
1697         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1698                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1699                 return NT_STATUS_NO_MEMORY;
1700         }
1701
1702         /* Marshall the bind request data */
1703         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1704                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1705                 return NT_STATUS_NO_MEMORY;
1706         }
1707
1708         /*
1709          * Grow the outgoing buffer to store any auth info.
1710          */
1711
1712         if(auth_len != 0) {
1713                 if (ss_padding_len) {
1714                         char pad[8];
1715                         memset(pad, '\0', 8);
1716                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1717                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1718                                 return NT_STATUS_NO_MEMORY;
1719                         }
1720                 }
1721
1722                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1723                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1724                         return NT_STATUS_NO_MEMORY;
1725                 }
1726
1727
1728                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1729                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1730                         return NT_STATUS_NO_MEMORY;
1731                 }
1732         }
1733
1734         return NT_STATUS_OK;
1735 }
1736
1737 /*******************************************************************
1738  Creates a DCE/RPC bind request.
1739  ********************************************************************/
1740
1741 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1742                                 prs_struct *rpc_out, 
1743                                 uint32 rpc_call_id,
1744                                 const RPC_IFACE *abstract,
1745                                 const RPC_IFACE *transfer,
1746                                 enum pipe_auth_type auth_type,
1747                                 enum pipe_auth_level auth_level)
1748 {
1749         RPC_HDR_AUTH hdr_auth;
1750         prs_struct auth_info;
1751         NTSTATUS ret = NT_STATUS_OK;
1752
1753         ZERO_STRUCT(hdr_auth);
1754         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1755                 return NT_STATUS_NO_MEMORY;
1756
1757         switch (auth_type) {
1758                 case PIPE_AUTH_TYPE_SCHANNEL:
1759                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1760                         if (!NT_STATUS_IS_OK(ret)) {
1761                                 prs_mem_free(&auth_info);
1762                                 return ret;
1763                         }
1764                         break;
1765
1766                 case PIPE_AUTH_TYPE_NTLMSSP:
1767                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1768                         if (!NT_STATUS_IS_OK(ret)) {
1769                                 prs_mem_free(&auth_info);
1770                                 return ret;
1771                         }
1772                         break;
1773
1774                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1775                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1776                         if (!NT_STATUS_IS_OK(ret)) {
1777                                 prs_mem_free(&auth_info);
1778                                 return ret;
1779                         }
1780                         break;
1781
1782                 case PIPE_AUTH_TYPE_KRB5:
1783                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1784                         if (!NT_STATUS_IS_OK(ret)) {
1785                                 prs_mem_free(&auth_info);
1786                                 return ret;
1787                         }
1788                         break;
1789
1790                 case PIPE_AUTH_TYPE_NONE:
1791                         break;
1792
1793                 default:
1794                         /* "Can't" happen. */
1795                         return NT_STATUS_INVALID_INFO_CLASS;
1796         }
1797
1798         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1799                                                 rpc_out, 
1800                                                 rpc_call_id,
1801                                                 abstract,
1802                                                 transfer,
1803                                                 &hdr_auth,
1804                                                 &auth_info);
1805
1806         prs_mem_free(&auth_info);
1807         return ret;
1808 }
1809
1810 /*******************************************************************
1811  Create and add the NTLMSSP sign/seal auth header and data.
1812  ********************************************************************/
1813
1814 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1815                                         RPC_HDR *phdr,
1816                                         uint32 ss_padding_len,
1817                                         prs_struct *outgoing_pdu)
1818 {
1819         RPC_HDR_AUTH auth_info;
1820         NTSTATUS status;
1821         DATA_BLOB auth_blob = data_blob_null;
1822         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1823
1824         if (!cli->auth->a_u.ntlmssp_state) {
1825                 return NT_STATUS_INVALID_PARAMETER;
1826         }
1827
1828         /* Init and marshall the auth header. */
1829         init_rpc_hdr_auth(&auth_info,
1830                         map_pipe_auth_type_to_rpc_auth_type(
1831                                 cli->auth->auth_type),
1832                         cli->auth->auth_level,
1833                         ss_padding_len,
1834                         1 /* context id. */);
1835
1836         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1837                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1838                 data_blob_free(&auth_blob);
1839                 return NT_STATUS_NO_MEMORY;
1840         }
1841
1842         switch (cli->auth->auth_level) {
1843                 case PIPE_AUTH_LEVEL_PRIVACY:
1844                         /* Data portion is encrypted. */
1845                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1846                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1847                                         data_and_pad_len,
1848                                         (unsigned char *)prs_data_p(outgoing_pdu),
1849                                         (size_t)prs_offset(outgoing_pdu),
1850                                         &auth_blob);
1851                         if (!NT_STATUS_IS_OK(status)) {
1852                                 data_blob_free(&auth_blob);
1853                                 return status;
1854                         }
1855                         break;
1856
1857                 case PIPE_AUTH_LEVEL_INTEGRITY:
1858                         /* Data is signed. */
1859                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1860                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1861                                         data_and_pad_len,
1862                                         (unsigned char *)prs_data_p(outgoing_pdu),
1863                                         (size_t)prs_offset(outgoing_pdu),
1864                                         &auth_blob);
1865                         if (!NT_STATUS_IS_OK(status)) {
1866                                 data_blob_free(&auth_blob);
1867                                 return status;
1868                         }
1869                         break;
1870
1871                 default:
1872                         /* Can't happen. */
1873                         smb_panic("bad auth level");
1874                         /* Notreached. */
1875                         return NT_STATUS_INVALID_PARAMETER;
1876         }
1877
1878         /* Finally marshall the blob. */
1879
1880         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1881                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1882                         (unsigned int)NTLMSSP_SIG_SIZE));
1883                 data_blob_free(&auth_blob);
1884                 return NT_STATUS_NO_MEMORY;
1885         }
1886
1887         data_blob_free(&auth_blob);
1888         return NT_STATUS_OK;
1889 }
1890
1891 /*******************************************************************
1892  Create and add the schannel sign/seal auth header and data.
1893  ********************************************************************/
1894
1895 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1896                                         RPC_HDR *phdr,
1897                                         uint32 ss_padding_len,
1898                                         prs_struct *outgoing_pdu)
1899 {
1900         RPC_HDR_AUTH auth_info;
1901         RPC_AUTH_SCHANNEL_CHK verf;
1902         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1903         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1904         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1905
1906         if (!sas) {
1907                 return NT_STATUS_INVALID_PARAMETER;
1908         }
1909
1910         /* Init and marshall the auth header. */
1911         init_rpc_hdr_auth(&auth_info,
1912                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1913                         cli->auth->auth_level,
1914                         ss_padding_len,
1915                         1 /* context id. */);
1916
1917         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1918                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1919                 return NT_STATUS_NO_MEMORY;
1920         }
1921
1922         switch (cli->auth->auth_level) {
1923                 case PIPE_AUTH_LEVEL_PRIVACY:
1924                 case PIPE_AUTH_LEVEL_INTEGRITY:
1925                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1926                                 sas->seq_num));
1927
1928                         schannel_encode(sas,
1929                                         cli->auth->auth_level,
1930                                         SENDER_IS_INITIATOR,
1931                                         &verf,
1932                                         data_p,
1933                                         data_and_pad_len);
1934
1935                         sas->seq_num++;
1936                         break;
1937
1938                 default:
1939                         /* Can't happen. */
1940                         smb_panic("bad auth level");
1941                         /* Notreached. */
1942                         return NT_STATUS_INVALID_PARAMETER;
1943         }
1944
1945         /* Finally marshall the blob. */
1946         smb_io_rpc_auth_schannel_chk("",
1947                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1948                         &verf,
1949                         outgoing_pdu,
1950                         0);
1951
1952         return NT_STATUS_OK;
1953 }
1954
1955 /*******************************************************************
1956  Calculate how much data we're going to send in this packet, also
1957  work out any sign/seal padding length.
1958  ********************************************************************/
1959
1960 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1961                                         uint32 data_left,
1962                                         uint16 *p_frag_len,
1963                                         uint16 *p_auth_len,
1964                                         uint32 *p_ss_padding)
1965 {
1966         uint32 data_space, data_len;
1967
1968 #ifdef DEVELOPER
1969         if ((data_left > 0) && (sys_random() % 2)) {
1970                 data_left = MAX(data_left/2, 1);
1971         }
1972 #endif
1973
1974         switch (cli->auth->auth_level) {
1975                 case PIPE_AUTH_LEVEL_NONE:
1976                 case PIPE_AUTH_LEVEL_CONNECT:
1977                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1978                         data_len = MIN(data_space, data_left);
1979                         *p_ss_padding = 0;
1980                         *p_auth_len = 0;
1981                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1982                         return data_len;
1983
1984                 case PIPE_AUTH_LEVEL_INTEGRITY:
1985                 case PIPE_AUTH_LEVEL_PRIVACY:
1986                         /* Treat the same for all authenticated rpc requests. */
1987                         switch(cli->auth->auth_type) {
1988                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1989                                 case PIPE_AUTH_TYPE_NTLMSSP:
1990                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1991                                         break;
1992                                 case PIPE_AUTH_TYPE_SCHANNEL:
1993                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1994                                         break;
1995                                 default:
1996                                         smb_panic("bad auth type");
1997                                         break;
1998                         }
1999
2000                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2001                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2002
2003                         data_len = MIN(data_space, data_left);
2004                         *p_ss_padding = 0;
2005                         if (data_len % 8) {
2006                                 *p_ss_padding = 8 - (data_len % 8);
2007                         }
2008                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2009                                         data_len + *p_ss_padding +              /* data plus padding. */
2010                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2011                         return data_len;
2012
2013                 default:
2014                         smb_panic("bad auth level");
2015                         /* Notreached. */
2016                         return 0;
2017         }
2018 }
2019
2020 /*******************************************************************
2021  External interface.
2022  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2023  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2024  and deals with signing/sealing details.
2025  ********************************************************************/
2026
2027 struct rpc_api_pipe_req_state {
2028         struct event_context *ev;
2029         struct rpc_pipe_client *cli;
2030         uint8_t op_num;
2031         uint32_t call_id;
2032         prs_struct *req_data;
2033         uint32_t req_data_sent;
2034         prs_struct outgoing_frag;
2035         prs_struct reply_pdu;
2036 };
2037
2038 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2039 {
2040         prs_mem_free(&s->outgoing_frag);
2041         prs_mem_free(&s->reply_pdu);
2042         return 0;
2043 }
2044
2045 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2046 static void rpc_api_pipe_req_done(struct tevent_req *subreq);
2047 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2048                                   bool *is_last_frag);
2049
2050 struct tevent_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2051                                          struct event_context *ev,
2052                                          struct rpc_pipe_client *cli,
2053                                          uint8_t op_num,
2054                                          prs_struct *req_data)
2055 {
2056         struct tevent_req *req, *subreq;
2057         struct rpc_api_pipe_req_state *state;
2058         NTSTATUS status;
2059         bool is_last_frag;
2060
2061         req = tevent_req_create(mem_ctx, &state,
2062                                 struct rpc_api_pipe_req_state);
2063         if (req == NULL) {
2064                 return NULL;
2065         }
2066         state->ev = ev;
2067         state->cli = cli;
2068         state->op_num = op_num;
2069         state->req_data = req_data;
2070         state->req_data_sent = 0;
2071         state->call_id = get_rpc_call_id();
2072
2073         if (cli->max_xmit_frag
2074             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2075                 /* Server is screwed up ! */
2076                 status = NT_STATUS_INVALID_PARAMETER;
2077                 goto post_status;
2078         }
2079
2080         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2081
2082         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2083                       state, MARSHALL)) {
2084                 goto fail;
2085         }
2086
2087         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2088
2089         status = prepare_next_frag(state, &is_last_frag);
2090         if (!NT_STATUS_IS_OK(status)) {
2091                 goto post_status;
2092         }
2093
2094         if (is_last_frag) {
2095                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2096                                            &state->outgoing_frag,
2097                                            RPC_RESPONSE);
2098                 if (subreq == NULL) {
2099                         goto fail;
2100                 }
2101                 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2102         } else {
2103                 subreq = rpc_write_send(
2104                         state, ev, cli->transport,
2105                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2106                         prs_offset(&state->outgoing_frag));
2107                 if (subreq == NULL) {
2108                         goto fail;
2109                 }
2110                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2111                                         req);
2112         }
2113         return req;
2114
2115  post_status:
2116         tevent_req_nterror(req, status);
2117         return tevent_req_post(req, ev);
2118  fail:
2119         TALLOC_FREE(req);
2120         return NULL;
2121 }
2122
2123 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2124                                   bool *is_last_frag)
2125 {
2126         RPC_HDR hdr;
2127         RPC_HDR_REQ hdr_req;
2128         uint32_t data_sent_thistime;
2129         uint16_t auth_len;
2130         uint16_t frag_len;
2131         uint8_t flags = 0;
2132         uint32_t ss_padding;
2133         uint32_t data_left;
2134         char pad[8] = { 0, };
2135         NTSTATUS status;
2136
2137         data_left = prs_offset(state->req_data) - state->req_data_sent;
2138
2139         data_sent_thistime = calculate_data_len_tosend(
2140                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2141
2142         if (state->req_data_sent == 0) {
2143                 flags = RPC_FLG_FIRST;
2144         }
2145
2146         if (data_sent_thistime == data_left) {
2147                 flags |= RPC_FLG_LAST;
2148         }
2149
2150         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2151                 return NT_STATUS_NO_MEMORY;
2152         }
2153
2154         /* Create and marshall the header and request header. */
2155         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2156                      auth_len);
2157
2158         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2159                 return NT_STATUS_NO_MEMORY;
2160         }
2161
2162         /* Create the rpc request RPC_HDR_REQ */
2163         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2164                          state->op_num);
2165
2166         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2167                                 &state->outgoing_frag, 0)) {
2168                 return NT_STATUS_NO_MEMORY;
2169         }
2170
2171         /* Copy in the data, plus any ss padding. */
2172         if (!prs_append_some_prs_data(&state->outgoing_frag,
2173                                       state->req_data, state->req_data_sent,
2174                                       data_sent_thistime)) {
2175                 return NT_STATUS_NO_MEMORY;
2176         }
2177
2178         /* Copy the sign/seal padding data. */
2179         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2180                 return NT_STATUS_NO_MEMORY;
2181         }
2182
2183         /* Generate any auth sign/seal and add the auth footer. */
2184         switch (state->cli->auth->auth_type) {
2185         case PIPE_AUTH_TYPE_NONE:
2186                 status = NT_STATUS_OK;
2187                 break;
2188         case PIPE_AUTH_TYPE_NTLMSSP:
2189         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2190                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2191                                                  &state->outgoing_frag);
2192                 break;
2193         case PIPE_AUTH_TYPE_SCHANNEL:
2194                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2195                                                   &state->outgoing_frag);
2196                 break;
2197         default:
2198                 status = NT_STATUS_INVALID_PARAMETER;
2199                 break;
2200         }
2201
2202         state->req_data_sent += data_sent_thistime;
2203         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2204
2205         return status;
2206 }
2207
2208 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2209 {
2210         struct tevent_req *req = tevent_req_callback_data(
2211                 subreq, struct tevent_req);
2212         struct rpc_api_pipe_req_state *state = tevent_req_data(
2213                 req, struct rpc_api_pipe_req_state);
2214         NTSTATUS status;
2215         bool is_last_frag;
2216
2217         status = rpc_write_recv(subreq);
2218         TALLOC_FREE(subreq);
2219         if (!NT_STATUS_IS_OK(status)) {
2220                 tevent_req_nterror(req, status);
2221                 return;
2222         }
2223
2224         status = prepare_next_frag(state, &is_last_frag);
2225         if (!NT_STATUS_IS_OK(status)) {
2226                 tevent_req_nterror(req, status);
2227                 return;
2228         }
2229
2230         if (is_last_frag) {
2231                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2232                                            &state->outgoing_frag,
2233                                            RPC_RESPONSE);
2234                 if (tevent_req_nomem(subreq, req)) {
2235                         return;
2236                 }
2237                 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2238         } else {
2239                 subreq = rpc_write_send(
2240                         state, state->ev,
2241                         state->cli->transport,
2242                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2243                         prs_offset(&state->outgoing_frag));
2244                 if (tevent_req_nomem(subreq, req)) {
2245                         return;
2246                 }
2247                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2248                                         req);
2249         }
2250 }
2251
2252 static void rpc_api_pipe_req_done(struct tevent_req *subreq)
2253 {
2254         struct tevent_req *req = tevent_req_callback_data(
2255                 subreq, struct tevent_req);
2256         struct rpc_api_pipe_req_state *state = tevent_req_data(
2257                 req, struct rpc_api_pipe_req_state);
2258         NTSTATUS status;
2259
2260         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2261         TALLOC_FREE(subreq);
2262         if (!NT_STATUS_IS_OK(status)) {
2263                 tevent_req_nterror(req, status);
2264                 return;
2265         }
2266         tevent_req_done(req);
2267 }
2268
2269 NTSTATUS rpc_api_pipe_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2270                                prs_struct *reply_pdu)
2271 {
2272         struct rpc_api_pipe_req_state *state = tevent_req_data(
2273                 req, struct rpc_api_pipe_req_state);
2274         NTSTATUS status;
2275
2276         if (tevent_req_is_nterror(req, &status)) {
2277                 /*
2278                  * We always have to initialize to reply pdu, even if there is
2279                  * none. The rpccli_* caller routines expect this.
2280                  */
2281                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2282                 return status;
2283         }
2284
2285         *reply_pdu = state->reply_pdu;
2286         reply_pdu->mem_ctx = mem_ctx;
2287
2288         /*
2289          * Prevent state->req_pdu from being freed in
2290          * rpc_api_pipe_req_state_destructor()
2291          */
2292         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2293
2294         return NT_STATUS_OK;
2295 }
2296
2297 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2298                         uint8 op_num,
2299                         prs_struct *in_data,
2300                         prs_struct *out_data)
2301 {
2302         TALLOC_CTX *frame = talloc_stackframe();
2303         struct event_context *ev;
2304         struct tevent_req *req;
2305         NTSTATUS status = NT_STATUS_NO_MEMORY;
2306
2307         ev = event_context_init(frame);
2308         if (ev == NULL) {
2309                 goto fail;
2310         }
2311
2312         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2313         if (req == NULL) {
2314                 goto fail;
2315         }
2316
2317         tevent_req_poll(req, ev);
2318
2319         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2320  fail:
2321         TALLOC_FREE(frame);
2322         return status;
2323 }
2324
2325 #if 0
2326 /****************************************************************************
2327  Set the handle state.
2328 ****************************************************************************/
2329
2330 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2331                                    const char *pipe_name, uint16 device_state)
2332 {
2333         bool state_set = False;
2334         char param[2];
2335         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2336         char *rparam = NULL;
2337         char *rdata = NULL;
2338         uint32 rparam_len, rdata_len;
2339
2340         if (pipe_name == NULL)
2341                 return False;
2342
2343         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2344                  cli->fnum, pipe_name, device_state));
2345
2346         /* create parameters: device state */
2347         SSVAL(param, 0, device_state);
2348
2349         /* create setup parameters. */
2350         setup[0] = 0x0001; 
2351         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2352
2353         /* send the data on \PIPE\ */
2354         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2355                     setup, 2, 0,                /* setup, length, max */
2356                     param, 2, 0,                /* param, length, max */
2357                     NULL, 0, 1024,              /* data, length, max */
2358                     &rparam, &rparam_len,        /* return param, length */
2359                     &rdata, &rdata_len))         /* return data, length */
2360         {
2361                 DEBUG(5, ("Set Handle state: return OK\n"));
2362                 state_set = True;
2363         }
2364
2365         SAFE_FREE(rparam);
2366         SAFE_FREE(rdata);
2367
2368         return state_set;
2369 }
2370 #endif
2371
2372 /****************************************************************************
2373  Check the rpc bind acknowledge response.
2374 ****************************************************************************/
2375
2376 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2377 {
2378         if ( hdr_ba->addr.len == 0) {
2379                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2380         }
2381
2382         /* check the transfer syntax */
2383         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2384              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2385                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2386                 return False;
2387         }
2388
2389         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2390                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2391                           hdr_ba->res.num_results, hdr_ba->res.reason));
2392         }
2393
2394         DEBUG(5,("check_bind_response: accepted!\n"));
2395         return True;
2396 }
2397
2398 /*******************************************************************
2399  Creates a DCE/RPC bind authentication response.
2400  This is the packet that is sent back to the server once we
2401  have received a BIND-ACK, to finish the third leg of
2402  the authentication handshake.
2403  ********************************************************************/
2404
2405 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2406                                 uint32 rpc_call_id,
2407                                 enum pipe_auth_type auth_type,
2408                                 enum pipe_auth_level auth_level,
2409                                 DATA_BLOB *pauth_blob,
2410                                 prs_struct *rpc_out)
2411 {
2412         RPC_HDR hdr;
2413         RPC_HDR_AUTH hdr_auth;
2414         uint32 pad = 0;
2415
2416         /* Create the request RPC_HDR */
2417         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2418                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2419                      pauth_blob->length );
2420
2421         /* Marshall it. */
2422         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2423                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2424                 return NT_STATUS_NO_MEMORY;
2425         }
2426
2427         /*
2428                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2429                 about padding - shouldn't this pad to length 8 ? JRA.
2430         */
2431
2432         /* 4 bytes padding. */
2433         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2434                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2435                 return NT_STATUS_NO_MEMORY;
2436         }
2437
2438         /* Create the request RPC_HDR_AUTHA */
2439         init_rpc_hdr_auth(&hdr_auth,
2440                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2441                         auth_level, 0, 1);
2442
2443         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2444                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2445                 return NT_STATUS_NO_MEMORY;
2446         }
2447
2448         /*
2449          * Append the auth data to the outgoing buffer.
2450          */
2451
2452         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2453                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2454                 return NT_STATUS_NO_MEMORY;
2455         }
2456
2457         return NT_STATUS_OK;
2458 }
2459
2460 /*******************************************************************
2461  Creates a DCE/RPC bind alter context authentication request which
2462  may contain a spnego auth blobl
2463  ********************************************************************/
2464
2465 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2466                                         const RPC_IFACE *abstract,
2467                                         const RPC_IFACE *transfer,
2468                                         enum pipe_auth_level auth_level,
2469                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2470                                         prs_struct *rpc_out)
2471 {
2472         RPC_HDR_AUTH hdr_auth;
2473         prs_struct auth_info;
2474         NTSTATUS ret = NT_STATUS_OK;
2475
2476         ZERO_STRUCT(hdr_auth);
2477         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2478                 return NT_STATUS_NO_MEMORY;
2479
2480         /* We may change the pad length before marshalling. */
2481         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2482
2483         if (pauth_blob->length) {
2484                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2485                         prs_mem_free(&auth_info);
2486                         return NT_STATUS_NO_MEMORY;
2487                 }
2488         }
2489
2490         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2491                                                 rpc_out, 
2492                                                 rpc_call_id,
2493                                                 abstract,
2494                                                 transfer,
2495                                                 &hdr_auth,
2496                                                 &auth_info);
2497         prs_mem_free(&auth_info);
2498         return ret;
2499 }
2500
2501 /****************************************************************************
2502  Do an rpc bind.
2503 ****************************************************************************/
2504
2505 struct rpc_pipe_bind_state {
2506         struct event_context *ev;
2507         struct rpc_pipe_client *cli;
2508         prs_struct rpc_out;
2509         uint32_t rpc_call_id;
2510 };
2511
2512 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2513 {
2514         prs_mem_free(&state->rpc_out);
2515         return 0;
2516 }
2517
2518 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq);
2519 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2520                                            struct rpc_pipe_bind_state *state,
2521                                            struct rpc_hdr_info *phdr,
2522                                            prs_struct *reply_pdu);
2523 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2524 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2525                                                     struct rpc_pipe_bind_state *state,
2526                                                     struct rpc_hdr_info *phdr,
2527                                                     prs_struct *reply_pdu);
2528 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq);
2529
2530 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2531                                      struct event_context *ev,
2532                                      struct rpc_pipe_client *cli,
2533                                      struct cli_pipe_auth_data *auth)
2534 {
2535         struct async_req *result;
2536         struct tevent_req *subreq;
2537         struct rpc_pipe_bind_state *state;
2538         NTSTATUS status;
2539
2540         if (!async_req_setup(mem_ctx, &result, &state,
2541                              struct rpc_pipe_bind_state)) {
2542                 return NULL;
2543         }
2544
2545         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2546                 rpccli_pipe_txt(debug_ctx(), cli),
2547                 (unsigned int)auth->auth_type,
2548                 (unsigned int)auth->auth_level ));
2549
2550         state->ev = ev;
2551         state->cli = cli;
2552         state->rpc_call_id = get_rpc_call_id();
2553
2554         prs_init_empty(&state->rpc_out, state, MARSHALL);
2555         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2556
2557         cli->auth = talloc_move(cli, &auth);
2558
2559         /* Marshall the outgoing data. */
2560         status = create_rpc_bind_req(cli, &state->rpc_out,
2561                                      state->rpc_call_id,
2562                                      &cli->abstract_syntax,
2563                                      &cli->transfer_syntax,
2564                                      cli->auth->auth_type,
2565                                      cli->auth->auth_level);
2566
2567         if (!NT_STATUS_IS_OK(status)) {
2568                 goto post_status;
2569         }
2570
2571         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2572                                    RPC_BINDACK);
2573         if (subreq == NULL) {
2574                 goto fail;
2575         }
2576         tevent_req_set_callback(subreq, rpc_pipe_bind_step_one_done, result);
2577         return result;
2578
2579  post_status:
2580         if (async_post_ntstatus(result, ev, status)) {
2581                 return result;
2582         }
2583  fail:
2584         TALLOC_FREE(result);
2585         return NULL;
2586 }
2587
2588 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq)
2589 {
2590         struct async_req *req = tevent_req_callback_data(
2591                 subreq, struct async_req);
2592         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2593                 req->private_data, struct rpc_pipe_bind_state);
2594         prs_struct reply_pdu;
2595         struct rpc_hdr_info hdr;
2596         struct rpc_hdr_ba_info hdr_ba;
2597         NTSTATUS status;
2598
2599         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2600         TALLOC_FREE(subreq);
2601         if (!NT_STATUS_IS_OK(status)) {
2602                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2603                           rpccli_pipe_txt(debug_ctx(), state->cli),
2604                           nt_errstr(status)));
2605                 async_req_nterror(req, status);
2606                 return;
2607         }
2608
2609         /* Unmarshall the RPC header */
2610         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2611                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2612                 prs_mem_free(&reply_pdu);
2613                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2614                 return;
2615         }
2616
2617         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2618                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2619                           "RPC_HDR_BA.\n"));
2620                 prs_mem_free(&reply_pdu);
2621                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2622                 return;
2623         }
2624
2625         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2626                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2627                 prs_mem_free(&reply_pdu);
2628                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2629                 return;
2630         }
2631
2632         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2633         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2634
2635         /*
2636          * For authenticated binds we may need to do 3 or 4 leg binds.
2637          */
2638
2639         switch(state->cli->auth->auth_type) {
2640
2641         case PIPE_AUTH_TYPE_NONE:
2642         case PIPE_AUTH_TYPE_SCHANNEL:
2643                 /* Bind complete. */
2644                 prs_mem_free(&reply_pdu);
2645                 async_req_done(req);
2646                 break;
2647
2648         case PIPE_AUTH_TYPE_NTLMSSP:
2649                 /* Need to send AUTH3 packet - no reply. */
2650                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2651                                                     &reply_pdu);
2652                 prs_mem_free(&reply_pdu);
2653                 if (!NT_STATUS_IS_OK(status)) {
2654                         async_req_nterror(req, status);
2655                 }
2656                 break;
2657
2658         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2659                 /* Need to send alter context request and reply. */
2660                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2661                                                              &reply_pdu);
2662                 prs_mem_free(&reply_pdu);
2663                 if (!NT_STATUS_IS_OK(status)) {
2664                         async_req_nterror(req, status);
2665                 }
2666                 break;
2667
2668         case PIPE_AUTH_TYPE_KRB5:
2669                 /* */
2670
2671         default:
2672                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2673                          (unsigned int)state->cli->auth->auth_type));
2674                 prs_mem_free(&reply_pdu);
2675                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2676         }
2677 }
2678
2679 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2680                                            struct rpc_pipe_bind_state *state,
2681                                            struct rpc_hdr_info *phdr,
2682                                            prs_struct *reply_pdu)
2683 {
2684         DATA_BLOB server_response = data_blob_null;
2685         DATA_BLOB client_reply = data_blob_null;
2686         struct rpc_hdr_auth_info hdr_auth;
2687         struct tevent_req *subreq;
2688         NTSTATUS status;
2689
2690         if ((phdr->auth_len == 0)
2691             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2692                 return NT_STATUS_INVALID_PARAMETER;
2693         }
2694
2695         if (!prs_set_offset(
2696                     reply_pdu,
2697                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2698                 return NT_STATUS_INVALID_PARAMETER;
2699         }
2700
2701         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2702                 return NT_STATUS_INVALID_PARAMETER;
2703         }
2704
2705         /* TODO - check auth_type/auth_level match. */
2706
2707         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2708         prs_copy_data_out((char *)server_response.data, reply_pdu,
2709                           phdr->auth_len);
2710
2711         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2712                                 server_response, &client_reply);
2713
2714         if (!NT_STATUS_IS_OK(status)) {
2715                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2716                           "blob failed: %s.\n", nt_errstr(status)));
2717                 return status;
2718         }
2719
2720         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2721
2722         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2723                                        state->cli->auth->auth_type,
2724                                        state->cli->auth->auth_level,
2725                                        &client_reply, &state->rpc_out);
2726         data_blob_free(&client_reply);
2727
2728         if (!NT_STATUS_IS_OK(status)) {
2729                 return status;
2730         }
2731
2732         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2733                                 (uint8_t *)prs_data_p(&state->rpc_out),
2734                                 prs_offset(&state->rpc_out));
2735         if (subreq == NULL) {
2736                 return NT_STATUS_NO_MEMORY;
2737         }
2738         tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2739         return NT_STATUS_OK;
2740 }
2741
2742 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2743 {
2744         struct async_req *req = tevent_req_callback_data(
2745                 subreq, struct async_req);
2746         NTSTATUS status;
2747
2748         status = rpc_write_recv(subreq);
2749         TALLOC_FREE(subreq);
2750         if (!NT_STATUS_IS_OK(status)) {
2751                 async_req_nterror(req, status);
2752                 return;
2753         }
2754         async_req_done(req);
2755 }
2756
2757 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2758                                                     struct rpc_pipe_bind_state *state,
2759                                                     struct rpc_hdr_info *phdr,
2760                                                     prs_struct *reply_pdu)
2761 {
2762         DATA_BLOB server_spnego_response = data_blob_null;
2763         DATA_BLOB server_ntlm_response = data_blob_null;
2764         DATA_BLOB client_reply = data_blob_null;
2765         DATA_BLOB tmp_blob = data_blob_null;
2766         RPC_HDR_AUTH hdr_auth;
2767         struct tevent_req *subreq;
2768         NTSTATUS status;
2769
2770         if ((phdr->auth_len == 0)
2771             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2772                 return NT_STATUS_INVALID_PARAMETER;
2773         }
2774
2775         /* Process the returned NTLMSSP blob first. */
2776         if (!prs_set_offset(
2777                     reply_pdu,
2778                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2779                 return NT_STATUS_INVALID_PARAMETER;
2780         }
2781
2782         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2783                 return NT_STATUS_INVALID_PARAMETER;
2784         }
2785
2786         server_spnego_response = data_blob(NULL, phdr->auth_len);
2787         prs_copy_data_out((char *)server_spnego_response.data,
2788                           reply_pdu, phdr->auth_len);
2789
2790         /*
2791          * The server might give us back two challenges - tmp_blob is for the
2792          * second.
2793          */
2794         if (!spnego_parse_challenge(server_spnego_response,
2795                                     &server_ntlm_response, &tmp_blob)) {
2796                 data_blob_free(&server_spnego_response);
2797                 data_blob_free(&server_ntlm_response);
2798                 data_blob_free(&tmp_blob);
2799                 return NT_STATUS_INVALID_PARAMETER;
2800         }
2801
2802         /* We're finished with the server spnego response and the tmp_blob. */
2803         data_blob_free(&server_spnego_response);
2804         data_blob_free(&tmp_blob);
2805
2806         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2807                                 server_ntlm_response, &client_reply);
2808
2809         /* Finished with the server_ntlm response */
2810         data_blob_free(&server_ntlm_response);
2811
2812         if (!NT_STATUS_IS_OK(status)) {
2813                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2814                           "using server blob failed.\n"));
2815                 data_blob_free(&client_reply);
2816                 return status;
2817         }
2818
2819         /* SPNEGO wrap the client reply. */
2820         tmp_blob = spnego_gen_auth(client_reply);
2821         data_blob_free(&client_reply);
2822         client_reply = tmp_blob;
2823         tmp_blob = data_blob_null;
2824
2825         /* Now prepare the alter context pdu. */
2826         prs_init_empty(&state->rpc_out, state, MARSHALL);
2827
2828         status = create_rpc_alter_context(state->rpc_call_id,
2829                                           &state->cli->abstract_syntax,
2830                                           &state->cli->transfer_syntax,
2831                                           state->cli->auth->auth_level,
2832                                           &client_reply,
2833                                           &state->rpc_out);
2834         data_blob_free(&client_reply);
2835
2836         if (!NT_STATUS_IS_OK(status)) {
2837                 return status;
2838         }
2839
2840         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2841                                    &state->rpc_out, RPC_ALTCONTRESP);
2842         if (subreq == NULL) {
2843                 return NT_STATUS_NO_MEMORY;
2844         }
2845         tevent_req_set_callback(subreq, rpc_bind_ntlmssp_api_done, req);
2846         return NT_STATUS_OK;
2847 }
2848
2849 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq)
2850 {
2851         struct async_req *req = tevent_req_callback_data(
2852                 subreq, struct async_req);
2853         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2854                 req->private_data, struct rpc_pipe_bind_state);
2855         DATA_BLOB server_spnego_response = data_blob_null;
2856         DATA_BLOB tmp_blob = data_blob_null;
2857         prs_struct reply_pdu;
2858         struct rpc_hdr_info hdr;
2859         struct rpc_hdr_auth_info hdr_auth;
2860         NTSTATUS status;
2861
2862         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2863         TALLOC_FREE(subreq);
2864         if (!NT_STATUS_IS_OK(status)) {
2865                 async_req_nterror(req, status);
2866                 return;
2867         }
2868
2869         /* Get the auth blob from the reply. */
2870         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2871                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2872                           "unmarshall RPC_HDR.\n"));
2873                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2874                 return;
2875         }
2876
2877         if (!prs_set_offset(
2878                     &reply_pdu,
2879                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2880                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2881                 return;
2882         }
2883
2884         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2885                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2886                 return;
2887         }
2888
2889         server_spnego_response = data_blob(NULL, hdr.auth_len);
2890         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2891                           hdr.auth_len);
2892
2893         /* Check we got a valid auth response. */
2894         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2895                                         OID_NTLMSSP, &tmp_blob)) {
2896                 data_blob_free(&server_spnego_response);
2897                 data_blob_free(&tmp_blob);
2898                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2899                 return;
2900         }
2901
2902         data_blob_free(&server_spnego_response);
2903         data_blob_free(&tmp_blob);
2904
2905         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2906                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2907         async_req_done(req);
2908 }
2909
2910 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2911 {
2912         return async_req_simple_recv_ntstatus(req);
2913 }
2914
2915 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2916                        struct cli_pipe_auth_data *auth)
2917 {
2918         TALLOC_CTX *frame = talloc_stackframe();
2919         struct event_context *ev;
2920         struct async_req *req;
2921         NTSTATUS status = NT_STATUS_NO_MEMORY;
2922
2923         ev = event_context_init(frame);
2924         if (ev == NULL) {
2925                 goto fail;
2926         }
2927
2928         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2929         if (req == NULL) {
2930                 goto fail;
2931         }
2932
2933         while (req->state < ASYNC_REQ_DONE) {
2934                 event_loop_once(ev);
2935         }
2936
2937         status = rpc_pipe_bind_recv(req);
2938  fail:
2939         TALLOC_FREE(frame);
2940         return status;
2941 }
2942
2943 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2944                                 unsigned int timeout)
2945 {
2946         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2947
2948         if (cli == NULL) {
2949                 return 0;
2950         }
2951         return cli_set_timeout(cli, timeout);
2952 }
2953
2954 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2955 {
2956         struct cli_state *cli;
2957
2958         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2959             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2960                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2961                 return true;
2962         }
2963
2964         cli = rpc_pipe_np_smb_conn(rpc_cli);
2965         if (cli == NULL) {
2966                 return false;
2967         }
2968         E_md4hash(cli->password ? cli->password : "", nt_hash);
2969         return true;
2970 }
2971
2972 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2973                                struct cli_pipe_auth_data **presult)
2974 {
2975         struct cli_pipe_auth_data *result;
2976
2977         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2978         if (result == NULL) {
2979                 return NT_STATUS_NO_MEMORY;
2980         }
2981
2982         result->auth_type = PIPE_AUTH_TYPE_NONE;
2983         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2984
2985         result->user_name = talloc_strdup(result, "");
2986         result->domain = talloc_strdup(result, "");
2987         if ((result->user_name == NULL) || (result->domain == NULL)) {
2988                 TALLOC_FREE(result);
2989                 return NT_STATUS_NO_MEMORY;
2990         }
2991
2992         *presult = result;
2993         return NT_STATUS_OK;
2994 }
2995
2996 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2997 {
2998         ntlmssp_end(&auth->a_u.ntlmssp_state);
2999         return 0;
3000 }
3001
3002 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3003                                   enum pipe_auth_type auth_type,
3004                                   enum pipe_auth_level auth_level,
3005                                   const char *domain,
3006                                   const char *username,
3007                                   const char *password,
3008                                   struct cli_pipe_auth_data **presult)
3009 {
3010         struct cli_pipe_auth_data *result;
3011         NTSTATUS status;
3012
3013         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3014         if (result == NULL) {
3015                 return NT_STATUS_NO_MEMORY;
3016         }
3017
3018         result->auth_type = auth_type;
3019         result->auth_level = auth_level;
3020
3021         result->user_name = talloc_strdup(result, username);
3022         result->domain = talloc_strdup(result, domain);
3023         if ((result->user_name == NULL) || (result->domain == NULL)) {
3024                 status = NT_STATUS_NO_MEMORY;
3025                 goto fail;
3026         }
3027
3028         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3029         if (!NT_STATUS_IS_OK(status)) {
3030                 goto fail;
3031         }
3032
3033         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3034
3035         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3036         if (!NT_STATUS_IS_OK(status)) {
3037                 goto fail;
3038         }
3039
3040         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3041         if (!NT_STATUS_IS_OK(status)) {
3042                 goto fail;
3043         }
3044
3045         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3046         if (!NT_STATUS_IS_OK(status)) {
3047                 goto fail;
3048         }
3049
3050         /*
3051          * Turn off sign+seal to allow selected auth level to turn it back on.
3052          */
3053         result->a_u.ntlmssp_state->neg_flags &=
3054                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3055
3056         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3057                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3058         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3059                 result->a_u.ntlmssp_state->neg_flags
3060                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3061         }
3062
3063         *presult = result;
3064         return NT_STATUS_OK;
3065
3066  fail:
3067         TALLOC_FREE(result);
3068         return status;
3069 }
3070
3071 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3072                                    enum pipe_auth_level auth_level,
3073                                    const uint8_t sess_key[16],
3074                                    struct cli_pipe_auth_data **presult)
3075 {
3076         struct cli_pipe_auth_data *result;
3077
3078         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3079         if (result == NULL) {
3080                 return NT_STATUS_NO_MEMORY;
3081         }
3082
3083         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3084         result->auth_level = auth_level;
3085
3086         result->user_name = talloc_strdup(result, "");
3087         result->domain = talloc_strdup(result, domain);
3088         if ((result->user_name == NULL) || (result->domain == NULL)) {
3089                 goto fail;
3090         }
3091
3092         result->a_u.schannel_auth = talloc(result,
3093                                            struct schannel_auth_struct);
3094         if (result->a_u.schannel_auth == NULL) {
3095                 goto fail;
3096         }
3097
3098         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3099                sizeof(result->a_u.schannel_auth->sess_key));
3100         result->a_u.schannel_auth->seq_num = 0;
3101
3102         *presult = result;
3103         return NT_STATUS_OK;
3104
3105  fail:
3106         TALLOC_FREE(result);
3107         return NT_STATUS_NO_MEMORY;
3108 }
3109
3110 #ifdef HAVE_KRB5
3111 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3112 {
3113         data_blob_free(&auth->session_key);
3114         return 0;
3115 }
3116 #endif
3117
3118 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3119                                    enum pipe_auth_level auth_level,
3120                                    const char *service_princ,
3121                                    const char *username,
3122                                    const char *password,
3123                                    struct cli_pipe_auth_data **presult)
3124 {
3125 #ifdef HAVE_KRB5
3126         struct cli_pipe_auth_data *result;
3127
3128         if ((username != NULL) && (password != NULL)) {
3129                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3130                 if (ret != 0) {
3131                         return NT_STATUS_ACCESS_DENIED;
3132                 }
3133         }
3134
3135         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3136         if (result == NULL) {
3137                 return NT_STATUS_NO_MEMORY;
3138         }
3139
3140         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3141         result->auth_level = auth_level;
3142
3143         /*
3144          * Username / domain need fixing!
3145          */
3146         result->user_name = talloc_strdup(result, "");
3147         result->domain = talloc_strdup(result, "");
3148         if ((result->user_name == NULL) || (result->domain == NULL)) {
3149                 goto fail;
3150         }
3151
3152         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3153                 result, struct kerberos_auth_struct);
3154         if (result->a_u.kerberos_auth == NULL) {
3155                 goto fail;
3156         }
3157         talloc_set_destructor(result->a_u.kerberos_auth,
3158                               cli_auth_kerberos_data_destructor);
3159
3160         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3161                 result, service_princ);
3162         if (result->a_u.kerberos_auth->service_principal == NULL) {
3163                 goto fail;
3164         }
3165
3166         *presult = result;
3167         return NT_STATUS_OK;
3168
3169  fail:
3170         TALLOC_FREE(result);
3171         return NT_STATUS_NO_MEMORY;
3172 #else
3173         return NT_STATUS_NOT_SUPPORTED;
3174 #endif
3175 }
3176
3177 /**
3178  * Create an rpc pipe client struct, connecting to a tcp port.
3179  */
3180 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3181                                        uint16_t port,
3182                                        const struct ndr_syntax_id *abstract_syntax,
3183                                        struct rpc_pipe_client **presult)
3184 {
3185         struct rpc_pipe_client *result;
3186         struct sockaddr_storage addr;
3187         NTSTATUS status;
3188         int fd;
3189
3190         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3191         if (result == NULL) {
3192                 return NT_STATUS_NO_MEMORY;
3193         }
3194
3195         result->abstract_syntax = *abstract_syntax;
3196         result->transfer_syntax = ndr_transfer_syntax;
3197         result->dispatch = cli_do_rpc_ndr;
3198
3199         result->desthost = talloc_strdup(result, host);
3200         result->srv_name_slash = talloc_asprintf_strupper_m(
3201                 result, "\\\\%s", result->desthost);
3202         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3203                 status = NT_STATUS_NO_MEMORY;
3204                 goto fail;
3205         }
3206
3207         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3208         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3209
3210         if (!resolve_name(host, &addr, 0)) {
3211                 status = NT_STATUS_NOT_FOUND;
3212                 goto fail;
3213         }
3214
3215         status = open_socket_out(&addr, port, 60, &fd);
3216         if (!NT_STATUS_IS_OK(status)) {
3217                 goto fail;
3218         }
3219         set_socket_options(fd, lp_socket_options());
3220
3221         status = rpc_transport_sock_init(result, fd, &result->transport);
3222         if (!NT_STATUS_IS_OK(status)) {
3223                 close(fd);
3224                 goto fail;
3225         }
3226
3227         *presult = result;
3228         return NT_STATUS_OK;
3229
3230  fail:
3231         TALLOC_FREE(result);
3232         return status;
3233 }
3234
3235 /**
3236  * Determine the tcp port on which a dcerpc interface is listening
3237  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3238  * target host.
3239  */
3240 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3241                                       const struct ndr_syntax_id *abstract_syntax,
3242                                       uint16_t *pport)
3243 {
3244         NTSTATUS status;
3245         struct rpc_pipe_client *epm_pipe = NULL;
3246         struct cli_pipe_auth_data *auth = NULL;
3247         struct dcerpc_binding *map_binding = NULL;
3248         struct dcerpc_binding *res_binding = NULL;
3249         struct epm_twr_t *map_tower = NULL;
3250         struct epm_twr_t *res_towers = NULL;
3251         struct policy_handle *entry_handle = NULL;
3252         uint32_t num_towers = 0;
3253         uint32_t max_towers = 1;
3254         struct epm_twr_p_t towers;
3255         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3256
3257         if (pport == NULL) {
3258                 status = NT_STATUS_INVALID_PARAMETER;
3259                 goto done;
3260         }
3261
3262         /* open the connection to the endpoint mapper */
3263         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3264                                         &ndr_table_epmapper.syntax_id,
3265                                         &epm_pipe);
3266
3267         if (!NT_STATUS_IS_OK(status)) {
3268                 goto done;
3269         }
3270
3271         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3272         if (!NT_STATUS_IS_OK(status)) {
3273                 goto done;
3274         }
3275
3276         status = rpc_pipe_bind(epm_pipe, auth);
3277         if (!NT_STATUS_IS_OK(status)) {
3278                 goto done;
3279         }
3280
3281         /* create tower for asking the epmapper */
3282
3283         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3284         if (map_binding == NULL) {
3285                 status = NT_STATUS_NO_MEMORY;
3286                 goto done;
3287         }
3288
3289         map_binding->transport = NCACN_IP_TCP;
3290         map_binding->object = *abstract_syntax;
3291         map_binding->host = host; /* needed? */
3292         map_binding->endpoint = "0"; /* correct? needed? */
3293
3294         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3295         if (map_tower == NULL) {
3296                 status = NT_STATUS_NO_MEMORY;
3297                 goto done;
3298         }
3299
3300         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3301                                             &(map_tower->tower));
3302         if (!NT_STATUS_IS_OK(status)) {
3303                 goto done;
3304         }
3305
3306         /* allocate further parameters for the epm_Map call */
3307
3308         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3309         if (res_towers == NULL) {
3310                 status = NT_STATUS_NO_MEMORY;
3311                 goto done;
3312         }
3313         towers.twr = res_towers;
3314
3315         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3316         if (entry_handle == NULL) {
3317                 status = NT_STATUS_NO_MEMORY;
3318                 goto done;
3319         }
3320
3321         /* ask the endpoint mapper for the port */
3322
3323         status = rpccli_epm_Map(epm_pipe,
3324                                 tmp_ctx,
3325                                 CONST_DISCARD(struct GUID *,
3326                                               &(abstract_syntax->uuid)),
3327                                 map_tower,
3328                                 entry_handle,
3329                                 max_towers,
3330                                 &num_towers,
3331                                 &towers);
3332
3333         if (!NT_STATUS_IS_OK(status)) {
3334                 goto done;
3335         }
3336
3337         if (num_towers != 1) {
3338                 status = NT_STATUS_UNSUCCESSFUL;
3339                 goto done;
3340         }
3341
3342         /* extract the port from the answer */
3343
3344         status = dcerpc_binding_from_tower(tmp_ctx,
3345                                            &(towers.twr->tower),
3346                                            &res_binding);
3347         if (!NT_STATUS_IS_OK(status)) {
3348                 goto done;
3349         }
3350
3351         /* are further checks here necessary? */
3352         if (res_binding->transport != NCACN_IP_TCP) {
3353                 status = NT_STATUS_UNSUCCESSFUL;
3354                 goto done;
3355         }
3356
3357         *pport = (uint16_t)atoi(res_binding->endpoint);
3358
3359 done:
3360         TALLOC_FREE(tmp_ctx);
3361         return status;
3362 }
3363
3364 /**
3365  * Create a rpc pipe client struct, connecting to a host via tcp.
3366  * The port is determined by asking the endpoint mapper on the given
3367  * host.
3368  */
3369 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3370                            const struct ndr_syntax_id *abstract_syntax,
3371                            struct rpc_pipe_client **presult)
3372 {
3373         NTSTATUS status;
3374         uint16_t port = 0;
3375
3376         *presult = NULL;
3377
3378         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3379         if (!NT_STATUS_IS_OK(status)) {
3380                 goto done;
3381         }
3382
3383         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3384                                         abstract_syntax, presult);
3385
3386 done:
3387         return status;
3388 }
3389
3390 /********************************************************************
3391  Create a rpc pipe client struct, connecting to a unix domain socket
3392  ********************************************************************/
3393 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3394                                const struct ndr_syntax_id *abstract_syntax,
3395                                struct rpc_pipe_client **presult)
3396 {
3397         struct rpc_pipe_client *result;
3398         struct sockaddr_un addr;
3399         NTSTATUS status;
3400         int fd;
3401
3402         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3403         if (result == NULL) {
3404                 return NT_STATUS_NO_MEMORY;
3405         }
3406
3407         result->abstract_syntax = *abstract_syntax;
3408         result->transfer_syntax = ndr_transfer_syntax;
3409         result->dispatch = cli_do_rpc_ndr;
3410
3411         result->desthost = get_myname(result);
3412         result->srv_name_slash = talloc_asprintf_strupper_m(
3413                 result, "\\\\%s", result->desthost);
3414         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3415                 status = NT_STATUS_NO_MEMORY;
3416                 goto fail;
3417         }
3418
3419         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3420         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3421
3422         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3423         if (fd == -1) {
3424                 status = map_nt_error_from_unix(errno);
3425                 goto fail;
3426         }
3427
3428         ZERO_STRUCT(addr);
3429         addr.sun_family = AF_UNIX;
3430         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3431
3432         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3433                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3434                           strerror(errno)));
3435                 close(fd);
3436                 return map_nt_error_from_unix(errno);
3437         }
3438
3439         status = rpc_transport_sock_init(result, fd, &result->transport);
3440         if (!NT_STATUS_IS_OK(status)) {
3441                 close(fd);
3442                 goto fail;
3443         }
3444
3445         *presult = result;
3446         return NT_STATUS_OK;
3447
3448  fail:
3449         TALLOC_FREE(result);
3450         return status;
3451 }
3452
3453 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3454 {
3455         struct cli_state *cli;
3456
3457         cli = rpc_pipe_np_smb_conn(p);
3458         if (cli != NULL) {
3459                 DLIST_REMOVE(cli->pipe_list, p);
3460         }
3461         return 0;
3462 }
3463
3464 /****************************************************************************
3465  Open a named pipe over SMB to a remote server.
3466  *
3467  * CAVEAT CALLER OF THIS FUNCTION:
3468  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3469  *    so be sure that this function is called AFTER any structure (vs pointer)
3470  *    assignment of the cli.  In particular, libsmbclient does structure
3471  *    assignments of cli, which invalidates the data in the returned
3472  *    rpc_pipe_client if this function is called before the structure assignment
3473  *    of cli.
3474  * 
3475  ****************************************************************************/
3476
3477 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3478                                  const struct ndr_syntax_id *abstract_syntax,
3479                                  struct rpc_pipe_client **presult)
3480 {
3481         struct rpc_pipe_client *result;
3482         NTSTATUS status;
3483
3484         /* sanity check to protect against crashes */
3485
3486         if ( !cli ) {
3487                 return NT_STATUS_INVALID_HANDLE;
3488         }
3489
3490         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3491         if (result == NULL) {
3492                 return NT_STATUS_NO_MEMORY;
3493         }
3494
3495         result->abstract_syntax = *abstract_syntax;
3496         result->transfer_syntax = ndr_transfer_syntax;
3497         result->dispatch = cli_do_rpc_ndr;
3498         result->desthost = talloc_strdup(result, cli->desthost);
3499         result->srv_name_slash = talloc_asprintf_strupper_m(
3500                 result, "\\\\%s", result->desthost);
3501
3502         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3503         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3504
3505         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3506                 TALLOC_FREE(result);
3507                 return NT_STATUS_NO_MEMORY;
3508         }
3509
3510         status = rpc_transport_np_init(result, cli, abstract_syntax,
3511                                        &result->transport);
3512         if (!NT_STATUS_IS_OK(status)) {
3513                 TALLOC_FREE(result);
3514                 return status;
3515         }
3516
3517         DLIST_ADD(cli->pipe_list, result);
3518         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3519
3520         *presult = result;
3521         return NT_STATUS_OK;
3522 }
3523
3524 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3525                              struct rpc_cli_smbd_conn *conn,
3526                              const struct ndr_syntax_id *syntax,
3527                              struct rpc_pipe_client **presult)
3528 {
3529         struct rpc_pipe_client *result;
3530         struct cli_pipe_auth_data *auth;
3531         NTSTATUS status;
3532
3533         result = talloc(mem_ctx, struct rpc_pipe_client);
3534         if (result == NULL) {
3535                 return NT_STATUS_NO_MEMORY;
3536         }
3537         result->abstract_syntax = *syntax;
3538         result->transfer_syntax = ndr_transfer_syntax;
3539         result->dispatch = cli_do_rpc_ndr;
3540         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3541         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3542
3543         result->desthost = talloc_strdup(result, global_myname());
3544         result->srv_name_slash = talloc_asprintf_strupper_m(
3545                 result, "\\\\%s", global_myname());
3546         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3547                 TALLOC_FREE(result);
3548                 return NT_STATUS_NO_MEMORY;
3549         }
3550
3551         status = rpc_transport_smbd_init(result, conn, syntax,
3552                                          &result->transport);
3553         if (!NT_STATUS_IS_OK(status)) {
3554                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3555                           nt_errstr(status)));
3556                 TALLOC_FREE(result);
3557                 return status;
3558         }
3559
3560         status = rpccli_anon_bind_data(result, &auth);
3561         if (!NT_STATUS_IS_OK(status)) {
3562                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3563                           nt_errstr(status)));
3564                 TALLOC_FREE(result);
3565                 return status;
3566         }
3567
3568         status = rpc_pipe_bind(result, auth);
3569         if (!NT_STATUS_IS_OK(status)) {
3570                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3571                 TALLOC_FREE(result);
3572                 return status;
3573         }
3574
3575         *presult = result;
3576         return NT_STATUS_OK;
3577 }
3578
3579 /****************************************************************************
3580  Open a pipe to a remote server.
3581  ****************************************************************************/
3582
3583 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3584                                   const struct ndr_syntax_id *interface,
3585                                   struct rpc_pipe_client **presult)
3586 {
3587         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3588                 /*
3589                  * We should have a better way to figure out this drsuapi
3590                  * speciality...
3591                  */
3592                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3593                                          presult);
3594         }
3595
3596         return rpc_pipe_open_np(cli, interface, presult);
3597 }
3598
3599 /****************************************************************************
3600  Open a named pipe to an SMB server and bind anonymously.
3601  ****************************************************************************/
3602
3603 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3604                                   const struct ndr_syntax_id *interface,
3605                                   struct rpc_pipe_client **presult)
3606 {
3607         struct rpc_pipe_client *result;
3608         struct cli_pipe_auth_data *auth;
3609         NTSTATUS status;
3610
3611         status = cli_rpc_pipe_open(cli, interface, &result);
3612         if (!NT_STATUS_IS_OK(status)) {
3613                 return status;
3614         }
3615
3616         status = rpccli_anon_bind_data(result, &auth);
3617         if (!NT_STATUS_IS_OK(status)) {
3618                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3619                           nt_errstr(status)));
3620                 TALLOC_FREE(result);
3621                 return status;
3622         }
3623
3624         /*
3625          * This is a bit of an abstraction violation due to the fact that an
3626          * anonymous bind on an authenticated SMB inherits the user/domain
3627          * from the enclosing SMB creds
3628          */
3629
3630         TALLOC_FREE(auth->user_name);
3631         TALLOC_FREE(auth->domain);
3632
3633         auth->user_name = talloc_strdup(auth, cli->user_name);
3634         auth->domain = talloc_strdup(auth, cli->domain);
3635         auth->user_session_key = data_blob_talloc(auth,
3636                 cli->user_session_key.data,
3637                 cli->user_session_key.length);
3638
3639         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3640                 TALLOC_FREE(result);
3641                 return NT_STATUS_NO_MEMORY;
3642         }
3643
3644         status = rpc_pipe_bind(result, auth);
3645         if (!NT_STATUS_IS_OK(status)) {
3646                 int lvl = 0;
3647                 if (ndr_syntax_id_equal(interface,
3648                                         &ndr_table_dssetup.syntax_id)) {
3649                         /* non AD domains just don't have this pipe, avoid
3650                          * level 0 statement in that case - gd */
3651                         lvl = 3;
3652                 }
3653                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3654                             "%s failed with error %s\n",
3655                             get_pipe_name_from_iface(interface),
3656                             nt_errstr(status) ));
3657                 TALLOC_FREE(result);
3658                 return status;
3659         }
3660
3661         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3662                   "%s and bound anonymously.\n",
3663                   get_pipe_name_from_iface(interface), cli->desthost));
3664
3665         *presult = result;
3666         return NT_STATUS_OK;
3667 }
3668
3669 /****************************************************************************
3670  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3671  ****************************************************************************/
3672
3673 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3674                                                    const struct ndr_syntax_id *interface,
3675