24dbcb01931e9e0125e5e4b22e9d1119e37cd0b9
[kai/samba.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct async_req *subreq);
211
212 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                        struct event_context *ev,
214                                        struct rpc_cli_transport *transport,
215                                        uint8_t *data, size_t size)
216 {
217         struct async_req *result, *subreq;
218         struct rpc_read_state *state;
219
220         if (!async_req_setup(mem_ctx, &result, &state,
221                              struct rpc_read_state)) {
222                 return NULL;
223         }
224         state->ev = ev;
225         state->transport = transport;
226         state->data = data;
227         state->size = size;
228         state->num_read = 0;
229
230         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
231
232         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
233                                       transport->priv);
234         if (subreq == NULL) {
235                 goto fail;
236         }
237         subreq->async.fn = rpc_read_done;
238         subreq->async.priv = result;
239         return result;
240
241  fail:
242         TALLOC_FREE(result);
243         return NULL;
244 }
245
246 static void rpc_read_done(struct async_req *subreq)
247 {
248         struct async_req *req = talloc_get_type_abort(
249                 subreq->async.priv, struct async_req);
250         struct rpc_read_state *state = talloc_get_type_abort(
251                 req->private_data, struct rpc_read_state);
252         NTSTATUS status;
253         ssize_t received;
254
255         status = state->transport->read_recv(subreq, &received);
256         TALLOC_FREE(subreq);
257         if (!NT_STATUS_IS_OK(status)) {
258                 async_req_nterror(req, status);
259                 return;
260         }
261
262         state->num_read += received;
263         if (state->num_read == state->size) {
264                 async_req_done(req);
265                 return;
266         }
267
268         subreq = state->transport->read_send(state, state->ev,
269                                              state->data + state->num_read,
270                                              state->size - state->num_read,
271                                              state->transport->priv);
272         if (async_req_nomem(subreq, req)) {
273                 return;
274         }
275         subreq->async.fn = rpc_read_done;
276         subreq->async.priv = req;
277 }
278
279 static NTSTATUS rpc_read_recv(struct async_req *req)
280 {
281         return async_req_simple_recv_ntstatus(req);
282 }
283
284 struct rpc_write_state {
285         struct event_context *ev;
286         struct rpc_cli_transport *transport;
287         const uint8_t *data;
288         size_t size;
289         size_t num_written;
290 };
291
292 static void rpc_write_done(struct async_req *subreq);
293
294 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
295                                         struct event_context *ev,
296                                         struct rpc_cli_transport *transport,
297                                         const uint8_t *data, size_t size)
298 {
299         struct async_req *result, *subreq;
300         struct rpc_write_state *state;
301
302         if (!async_req_setup(mem_ctx, &result, &state,
303                              struct rpc_write_state)) {
304                 return NULL;
305         }
306         state->ev = ev;
307         state->transport = transport;
308         state->data = data;
309         state->size = size;
310         state->num_written = 0;
311
312         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
313
314         subreq = transport->write_send(state, ev, data, size, transport->priv);
315         if (subreq == NULL) {
316                 goto fail;
317         }
318         subreq->async.fn = rpc_write_done;
319         subreq->async.priv = result;
320         return result;
321  fail:
322         TALLOC_FREE(result);
323         return NULL;
324 }
325
326 static void rpc_write_done(struct async_req *subreq)
327 {
328         struct async_req *req = talloc_get_type_abort(
329                 subreq->async.priv, struct async_req);
330         struct rpc_write_state *state = talloc_get_type_abort(
331                 req->private_data, struct rpc_write_state);
332         NTSTATUS status;
333         ssize_t written;
334
335         status = state->transport->write_recv(subreq, &written);
336         TALLOC_FREE(subreq);
337         if (!NT_STATUS_IS_OK(status)) {
338                 async_req_nterror(req, status);
339                 return;
340         }
341
342         state->num_written += written;
343
344         if (state->num_written == state->size) {
345                 async_req_done(req);
346                 return;
347         }
348
349         subreq = state->transport->write_send(state, state->ev,
350                                               state->data + state->num_written,
351                                               state->size - state->num_written,
352                                               state->transport->priv);
353         if (async_req_nomem(subreq, req)) {
354                 return;
355         }
356         subreq->async.fn = rpc_write_done;
357         subreq->async.priv = req;
358 }
359
360 static NTSTATUS rpc_write_recv(struct async_req *req)
361 {
362         return async_req_simple_recv_ntstatus(req);
363 }
364
365
366 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
367                                  struct rpc_hdr_info *prhdr,
368                                  prs_struct *pdu)
369 {
370         /*
371          * This next call sets the endian bit correctly in current_pdu. We
372          * will propagate this to rbuf later.
373          */
374
375         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
376                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
377                 return NT_STATUS_BUFFER_TOO_SMALL;
378         }
379
380         if (prhdr->frag_len > cli->max_recv_frag) {
381                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
382                           " we only allow %d\n", (int)prhdr->frag_len,
383                           (int)cli->max_recv_frag));
384                 return NT_STATUS_BUFFER_TOO_SMALL;
385         }
386
387         return NT_STATUS_OK;
388 }
389
390 /****************************************************************************
391  Try and get a PDU's worth of data from current_pdu. If not, then read more
392  from the wire.
393  ****************************************************************************/
394
395 struct get_complete_frag_state {
396         struct event_context *ev;
397         struct rpc_pipe_client *cli;
398         struct rpc_hdr_info *prhdr;
399         prs_struct *pdu;
400 };
401
402 static void get_complete_frag_got_header(struct async_req *subreq);
403 static void get_complete_frag_got_rest(struct async_req *subreq);
404
405 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
406                                                struct event_context *ev,
407                                                struct rpc_pipe_client *cli,
408                                                struct rpc_hdr_info *prhdr,
409                                                prs_struct *pdu)
410 {
411         struct async_req *result, *subreq;
412         struct get_complete_frag_state *state;
413         uint32_t pdu_len;
414         NTSTATUS status;
415
416         if (!async_req_setup(mem_ctx, &result, &state,
417                              struct get_complete_frag_state)) {
418                 return NULL;
419         }
420         state->ev = ev;
421         state->cli = cli;
422         state->prhdr = prhdr;
423         state->pdu = pdu;
424
425         pdu_len = prs_data_size(pdu);
426         if (pdu_len < RPC_HEADER_LEN) {
427                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
428                         status = NT_STATUS_NO_MEMORY;
429                         goto post_status;
430                 }
431                 subreq = rpc_read_send(
432                         state, state->ev,
433                         state->cli->transport,
434                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
435                         RPC_HEADER_LEN - pdu_len);
436                 if (subreq == NULL) {
437                         status = NT_STATUS_NO_MEMORY;
438                         goto post_status;
439                 }
440                 subreq->async.fn = get_complete_frag_got_header;
441                 subreq->async.priv = result;
442                 return result;
443         }
444
445         status = parse_rpc_header(cli, prhdr, pdu);
446         if (!NT_STATUS_IS_OK(status)) {
447                 goto post_status;
448         }
449
450         /*
451          * Ensure we have frag_len bytes of data.
452          */
453         if (pdu_len < prhdr->frag_len) {
454                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
455                         status = NT_STATUS_NO_MEMORY;
456                         goto post_status;
457                 }
458                 subreq = rpc_read_send(state, state->ev,
459                                        state->cli->transport,
460                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
461                                        prhdr->frag_len - pdu_len);
462                 if (subreq == NULL) {
463                         status = NT_STATUS_NO_MEMORY;
464                         goto post_status;
465                 }
466                 subreq->async.fn = get_complete_frag_got_rest;
467                 subreq->async.priv = result;
468                 return result;
469         }
470
471         status = NT_STATUS_OK;
472  post_status:
473         if (async_post_ntstatus(result, ev, status)) {
474                 return result;
475         }
476         TALLOC_FREE(result);
477         return NULL;
478 }
479
480 static void get_complete_frag_got_header(struct async_req *subreq)
481 {
482         struct async_req *req = talloc_get_type_abort(
483                 subreq->async.priv, struct async_req);
484         struct get_complete_frag_state *state = talloc_get_type_abort(
485                 req->private_data, struct get_complete_frag_state);
486         NTSTATUS status;
487
488         status = rpc_read_recv(subreq);
489         TALLOC_FREE(subreq);
490         if (!NT_STATUS_IS_OK(status)) {
491                 async_req_nterror(req, status);
492                 return;
493         }
494
495         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
496         if (!NT_STATUS_IS_OK(status)) {
497                 async_req_nterror(req, status);
498                 return;
499         }
500
501         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
502                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
503                 return;
504         }
505
506         /*
507          * We're here in this piece of code because we've read exactly
508          * RPC_HEADER_LEN bytes into state->pdu.
509          */
510
511         subreq = rpc_read_send(
512                 state, state->ev, state->cli->transport,
513                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
514                 state->prhdr->frag_len - RPC_HEADER_LEN);
515         if (async_req_nomem(subreq, req)) {
516                 return;
517         }
518         subreq->async.fn = get_complete_frag_got_rest;
519         subreq->async.priv = req;
520 }
521
522 static void get_complete_frag_got_rest(struct async_req *subreq)
523 {
524         struct async_req *req = talloc_get_type_abort(
525                 subreq->async.priv, struct async_req);
526         NTSTATUS status;
527
528         status = rpc_read_recv(subreq);
529         TALLOC_FREE(subreq);
530         if (!NT_STATUS_IS_OK(status)) {
531                 async_req_nterror(req, status);
532                 return;
533         }
534         async_req_done(req);
535 }
536
537 static NTSTATUS get_complete_frag_recv(struct async_req *req)
538 {
539         return async_req_simple_recv_ntstatus(req);
540 }
541
542 /****************************************************************************
543  NTLMSSP specific sign/seal.
544  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
545  In fact I should probably abstract these into identical pieces of code... JRA.
546  ****************************************************************************/
547
548 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
549                                 prs_struct *current_pdu,
550                                 uint8 *p_ss_padding_len)
551 {
552         RPC_HDR_AUTH auth_info;
553         uint32 save_offset = prs_offset(current_pdu);
554         uint32 auth_len = prhdr->auth_len;
555         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
556         unsigned char *data = NULL;
557         size_t data_len;
558         unsigned char *full_packet_data = NULL;
559         size_t full_packet_data_len;
560         DATA_BLOB auth_blob;
561         NTSTATUS status;
562
563         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
564             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
565                 return NT_STATUS_OK;
566         }
567
568         if (!ntlmssp_state) {
569                 return NT_STATUS_INVALID_PARAMETER;
570         }
571
572         /* Ensure there's enough data for an authenticated response. */
573         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
574                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
575                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
576                         (unsigned int)auth_len ));
577                 return NT_STATUS_BUFFER_TOO_SMALL;
578         }
579
580         /*
581          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
582          * after the RPC header.
583          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
584          * functions as NTLMv2 checks the rpc headers also.
585          */
586
587         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
588         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
589
590         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
591         full_packet_data_len = prhdr->frag_len - auth_len;
592
593         /* Pull the auth header and the following data into a blob. */
594         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
595                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
596                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
597                 return NT_STATUS_BUFFER_TOO_SMALL;
598         }
599
600         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
601                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
602                 return NT_STATUS_BUFFER_TOO_SMALL;
603         }
604
605         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
606         auth_blob.length = auth_len;
607
608         switch (cli->auth->auth_level) {
609                 case PIPE_AUTH_LEVEL_PRIVACY:
610                         /* Data is encrypted. */
611                         status = ntlmssp_unseal_packet(ntlmssp_state,
612                                                         data, data_len,
613                                                         full_packet_data,
614                                                         full_packet_data_len,
615                                                         &auth_blob);
616                         if (!NT_STATUS_IS_OK(status)) {
617                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
618                                         "packet from %s. Error was %s.\n",
619                                         rpccli_pipe_txt(debug_ctx(), cli),
620                                         nt_errstr(status) ));
621                                 return status;
622                         }
623                         break;
624                 case PIPE_AUTH_LEVEL_INTEGRITY:
625                         /* Data is signed. */
626                         status = ntlmssp_check_packet(ntlmssp_state,
627                                                         data, data_len,
628                                                         full_packet_data,
629                                                         full_packet_data_len,
630                                                         &auth_blob);
631                         if (!NT_STATUS_IS_OK(status)) {
632                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
633                                         "packet from %s. Error was %s.\n",
634                                         rpccli_pipe_txt(debug_ctx(), cli),
635                                         nt_errstr(status) ));
636                                 return status;
637                         }
638                         break;
639                 default:
640                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
641                                   "auth level %d\n", cli->auth->auth_level));
642                         return NT_STATUS_INVALID_INFO_CLASS;
643         }
644
645         /*
646          * Return the current pointer to the data offset.
647          */
648
649         if(!prs_set_offset(current_pdu, save_offset)) {
650                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
651                         (unsigned int)save_offset ));
652                 return NT_STATUS_BUFFER_TOO_SMALL;
653         }
654
655         /*
656          * Remember the padding length. We must remove it from the real data
657          * stream once the sign/seal is done.
658          */
659
660         *p_ss_padding_len = auth_info.auth_pad_len;
661
662         return NT_STATUS_OK;
663 }
664
665 /****************************************************************************
666  schannel specific sign/seal.
667  ****************************************************************************/
668
669 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
670                                 prs_struct *current_pdu,
671                                 uint8 *p_ss_padding_len)
672 {
673         RPC_HDR_AUTH auth_info;
674         RPC_AUTH_SCHANNEL_CHK schannel_chk;
675         uint32 auth_len = prhdr->auth_len;
676         uint32 save_offset = prs_offset(current_pdu);
677         struct schannel_auth_struct *schannel_auth =
678                 cli->auth->a_u.schannel_auth;
679         uint32 data_len;
680
681         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
682             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
683                 return NT_STATUS_OK;
684         }
685
686         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
687                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
688                 return NT_STATUS_INVALID_PARAMETER;
689         }
690
691         if (!schannel_auth) {
692                 return NT_STATUS_INVALID_PARAMETER;
693         }
694
695         /* Ensure there's enough data for an authenticated response. */
696         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
697                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
698                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
699                         (unsigned int)auth_len ));
700                 return NT_STATUS_INVALID_PARAMETER;
701         }
702
703         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
704
705         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
706                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
707                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
708                 return NT_STATUS_BUFFER_TOO_SMALL;
709         }
710
711         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
712                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
713                 return NT_STATUS_BUFFER_TOO_SMALL;
714         }
715
716         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
717                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
718                         auth_info.auth_type));
719                 return NT_STATUS_BUFFER_TOO_SMALL;
720         }
721
722         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
723                                 &schannel_chk, current_pdu, 0)) {
724                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
725                 return NT_STATUS_BUFFER_TOO_SMALL;
726         }
727
728         if (!schannel_decode(schannel_auth,
729                         cli->auth->auth_level,
730                         SENDER_IS_ACCEPTOR,
731                         &schannel_chk,
732                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
733                         data_len)) {
734                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
735                                 "Connection to %s.\n",
736                                 rpccli_pipe_txt(debug_ctx(), cli)));
737                 return NT_STATUS_INVALID_PARAMETER;
738         }
739
740         /* The sequence number gets incremented on both send and receive. */
741         schannel_auth->seq_num++;
742
743         /*
744          * Return the current pointer to the data offset.
745          */
746
747         if(!prs_set_offset(current_pdu, save_offset)) {
748                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
749                         (unsigned int)save_offset ));
750                 return NT_STATUS_BUFFER_TOO_SMALL;
751         }
752
753         /*
754          * Remember the padding length. We must remove it from the real data
755          * stream once the sign/seal is done.
756          */
757
758         *p_ss_padding_len = auth_info.auth_pad_len;
759
760         return NT_STATUS_OK;
761 }
762
763 /****************************************************************************
764  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
765  ****************************************************************************/
766
767 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
768                                 prs_struct *current_pdu,
769                                 uint8 *p_ss_padding_len)
770 {
771         NTSTATUS ret = NT_STATUS_OK;
772
773         /* Paranioa checks for auth_len. */
774         if (prhdr->auth_len) {
775                 if (prhdr->auth_len > prhdr->frag_len) {
776                         return NT_STATUS_INVALID_PARAMETER;
777                 }
778
779                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
780                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
781                         /* Integer wrap attempt. */
782                         return NT_STATUS_INVALID_PARAMETER;
783                 }
784         }
785
786         /*
787          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
788          */
789
790         switch(cli->auth->auth_type) {
791                 case PIPE_AUTH_TYPE_NONE:
792                         if (prhdr->auth_len) {
793                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
794                                           "Connection to %s - got non-zero "
795                                           "auth len %u.\n",
796                                         rpccli_pipe_txt(debug_ctx(), cli),
797                                         (unsigned int)prhdr->auth_len ));
798                                 return NT_STATUS_INVALID_PARAMETER;
799                         }
800                         break;
801
802                 case PIPE_AUTH_TYPE_NTLMSSP:
803                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
804                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
805                         if (!NT_STATUS_IS_OK(ret)) {
806                                 return ret;
807                         }
808                         break;
809
810                 case PIPE_AUTH_TYPE_SCHANNEL:
811                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
812                         if (!NT_STATUS_IS_OK(ret)) {
813                                 return ret;
814                         }
815                         break;
816
817                 case PIPE_AUTH_TYPE_KRB5:
818                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
819                 default:
820                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
821                                   "to %s - unknown internal auth type %u.\n",
822                                   rpccli_pipe_txt(debug_ctx(), cli),
823                                   cli->auth->auth_type ));
824                         return NT_STATUS_INVALID_INFO_CLASS;
825         }
826
827         return NT_STATUS_OK;
828 }
829
830 /****************************************************************************
831  Do basic authentication checks on an incoming pdu.
832  ****************************************************************************/
833
834 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
835                         prs_struct *current_pdu,
836                         uint8 expected_pkt_type,
837                         char **ppdata,
838                         uint32 *pdata_len,
839                         prs_struct *return_data)
840 {
841
842         NTSTATUS ret = NT_STATUS_OK;
843         uint32 current_pdu_len = prs_data_size(current_pdu);
844
845         if (current_pdu_len != prhdr->frag_len) {
846                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
847                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
848                 return NT_STATUS_INVALID_PARAMETER;
849         }
850
851         /*
852          * Point the return values at the real data including the RPC
853          * header. Just in case the caller wants it.
854          */
855         *ppdata = prs_data_p(current_pdu);
856         *pdata_len = current_pdu_len;
857
858         /* Ensure we have the correct type. */
859         switch (prhdr->pkt_type) {
860                 case RPC_ALTCONTRESP:
861                 case RPC_BINDACK:
862
863                         /* Alter context and bind ack share the same packet definitions. */
864                         break;
865
866
867                 case RPC_RESPONSE:
868                 {
869                         RPC_HDR_RESP rhdr_resp;
870                         uint8 ss_padding_len = 0;
871
872                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
873                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
874                                 return NT_STATUS_BUFFER_TOO_SMALL;
875                         }
876
877                         /* Here's where we deal with incoming sign/seal. */
878                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
879                                         current_pdu, &ss_padding_len);
880                         if (!NT_STATUS_IS_OK(ret)) {
881                                 return ret;
882                         }
883
884                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
885                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
886
887                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
888                                 return NT_STATUS_BUFFER_TOO_SMALL;
889                         }
890
891                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
892
893                         /* Remember to remove the auth footer. */
894                         if (prhdr->auth_len) {
895                                 /* We've already done integer wrap tests on auth_len in
896                                         cli_pipe_validate_rpc_response(). */
897                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
898                                         return NT_STATUS_BUFFER_TOO_SMALL;
899                                 }
900                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
901                         }
902
903                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
904                                 current_pdu_len, *pdata_len, ss_padding_len ));
905
906                         /*
907                          * If this is the first reply, and the allocation hint is reasonably, try and
908                          * set up the return_data parse_struct to the correct size.
909                          */
910
911                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
912                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
913                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
914                                                 "too large to allocate\n",
915                                                 (unsigned int)rhdr_resp.alloc_hint ));
916                                         return NT_STATUS_NO_MEMORY;
917                                 }
918                         }
919
920                         break;
921                 }
922
923                 case RPC_BINDNACK:
924                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
925                                   "received from %s!\n",
926                                   rpccli_pipe_txt(debug_ctx(), cli)));
927                         /* Use this for now... */
928                         return NT_STATUS_NETWORK_ACCESS_DENIED;
929
930                 case RPC_FAULT:
931                 {
932                         RPC_HDR_RESP rhdr_resp;
933                         RPC_HDR_FAULT fault_resp;
934
935                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
936                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
937                                 return NT_STATUS_BUFFER_TOO_SMALL;
938                         }
939
940                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
941                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
942                                 return NT_STATUS_BUFFER_TOO_SMALL;
943                         }
944
945                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
946                                   "code %s received from %s!\n",
947                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
948                                 rpccli_pipe_txt(debug_ctx(), cli)));
949                         if (NT_STATUS_IS_OK(fault_resp.status)) {
950                                 return NT_STATUS_UNSUCCESSFUL;
951                         } else {
952                                 return fault_resp.status;
953                         }
954                 }
955
956                 default:
957                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
958                                 "from %s!\n",
959                                 (unsigned int)prhdr->pkt_type,
960                                 rpccli_pipe_txt(debug_ctx(), cli)));
961                         return NT_STATUS_INVALID_INFO_CLASS;
962         }
963
964         if (prhdr->pkt_type != expected_pkt_type) {
965                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
966                           "got an unexpected RPC packet type - %u, not %u\n",
967                         rpccli_pipe_txt(debug_ctx(), cli),
968                         prhdr->pkt_type,
969                         expected_pkt_type));
970                 return NT_STATUS_INVALID_INFO_CLASS;
971         }
972
973         /* Do this just before return - we don't want to modify any rpc header
974            data before now as we may have needed to do cryptographic actions on
975            it before. */
976
977         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
978                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
979                         "setting fragment first/last ON.\n"));
980                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
981         }
982
983         return NT_STATUS_OK;
984 }
985
986 /****************************************************************************
987  Ensure we eat the just processed pdu from the current_pdu prs_struct.
988  Normally the frag_len and buffer size will match, but on the first trans
989  reply there is a theoretical chance that buffer size > frag_len, so we must
990  deal with that.
991  ****************************************************************************/
992
993 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
994 {
995         uint32 current_pdu_len = prs_data_size(current_pdu);
996
997         if (current_pdu_len < prhdr->frag_len) {
998                 return NT_STATUS_BUFFER_TOO_SMALL;
999         }
1000
1001         /* Common case. */
1002         if (current_pdu_len == (uint32)prhdr->frag_len) {
1003                 prs_mem_free(current_pdu);
1004                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1005                 /* Make current_pdu dynamic with no memory. */
1006                 prs_give_memory(current_pdu, 0, 0, True);
1007                 return NT_STATUS_OK;
1008         }
1009
1010         /*
1011          * Oh no ! More data in buffer than we processed in current pdu.
1012          * Cheat. Move the data down and shrink the buffer.
1013          */
1014
1015         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1016                         current_pdu_len - prhdr->frag_len);
1017
1018         /* Remember to set the read offset back to zero. */
1019         prs_set_offset(current_pdu, 0);
1020
1021         /* Shrink the buffer. */
1022         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1023                 return NT_STATUS_BUFFER_TOO_SMALL;
1024         }
1025
1026         return NT_STATUS_OK;
1027 }
1028
1029 /****************************************************************************
1030  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1031 ****************************************************************************/
1032
1033 struct cli_api_pipe_state {
1034         struct event_context *ev;
1035         struct rpc_cli_transport *transport;
1036         uint8_t *rdata;
1037         uint32_t rdata_len;
1038 };
1039
1040 static void cli_api_pipe_trans_done(struct async_req *subreq);
1041 static void cli_api_pipe_write_done(struct async_req *subreq);
1042 static void cli_api_pipe_read_done(struct async_req *subreq);
1043
1044 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1045                                            struct event_context *ev,
1046                                            struct rpc_cli_transport *transport,
1047                                            uint8_t *data, size_t data_len,
1048                                            uint32_t max_rdata_len)
1049 {
1050         struct async_req *result, *subreq;
1051         struct cli_api_pipe_state *state;
1052         NTSTATUS status;
1053
1054         if (!async_req_setup(mem_ctx, &result, &state,
1055                              struct cli_api_pipe_state)) {
1056                 return NULL;
1057         }
1058         state->ev = ev;
1059         state->transport = transport;
1060
1061         if (max_rdata_len < RPC_HEADER_LEN) {
1062                 /*
1063                  * For a RPC reply we always need at least RPC_HEADER_LEN
1064                  * bytes. We check this here because we will receive
1065                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1066                  */
1067                 status = NT_STATUS_INVALID_PARAMETER;
1068                 goto post_status;
1069         }
1070
1071         if (transport->trans_send != NULL) {
1072                 subreq = transport->trans_send(state, ev, data, data_len,
1073                                                max_rdata_len, transport->priv);
1074                 if (subreq == NULL) {
1075                         status = NT_STATUS_NO_MEMORY;
1076                         goto post_status;
1077                 }
1078                 subreq->async.fn = cli_api_pipe_trans_done;
1079                 subreq->async.priv = result;
1080                 return result;
1081         }
1082
1083         /*
1084          * If the transport does not provide a "trans" routine, i.e. for
1085          * example the ncacn_ip_tcp transport, do the write/read step here.
1086          */
1087
1088         subreq = rpc_write_send(state, ev, transport, data, data_len);
1089         if (subreq == NULL) {
1090                 goto fail;
1091         }
1092         subreq->async.fn = cli_api_pipe_write_done;
1093         subreq->async.priv = result;
1094         return result;
1095
1096         status = NT_STATUS_INVALID_PARAMETER;
1097
1098  post_status:
1099         if (async_post_ntstatus(result, ev, status)) {
1100                 return result;
1101         }
1102  fail:
1103         TALLOC_FREE(result);
1104         return NULL;
1105 }
1106
1107 static void cli_api_pipe_trans_done(struct async_req *subreq)
1108 {
1109         struct async_req *req = talloc_get_type_abort(
1110                 subreq->async.priv, struct async_req);
1111         struct cli_api_pipe_state *state = talloc_get_type_abort(
1112                 req->private_data, struct cli_api_pipe_state);
1113         NTSTATUS status;
1114
1115         status = state->transport->trans_recv(subreq, state, &state->rdata,
1116                                               &state->rdata_len);
1117         TALLOC_FREE(subreq);
1118         if (!NT_STATUS_IS_OK(status)) {
1119                 async_req_nterror(req, status);
1120                 return;
1121         }
1122         async_req_done(req);
1123 }
1124
1125 static void cli_api_pipe_write_done(struct async_req *subreq)
1126 {
1127         struct async_req *req = talloc_get_type_abort(
1128                 subreq->async.priv, struct async_req);
1129         struct cli_api_pipe_state *state = talloc_get_type_abort(
1130                 req->private_data, struct cli_api_pipe_state);
1131         NTSTATUS status;
1132
1133         status = rpc_write_recv(subreq);
1134         TALLOC_FREE(subreq);
1135         if (!NT_STATUS_IS_OK(status)) {
1136                 async_req_nterror(req, status);
1137                 return;
1138         }
1139
1140         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1141         if (async_req_nomem(state->rdata, req)) {
1142                 return;
1143         }
1144
1145         /*
1146          * We don't need to use rpc_read_send here, the upper layer will cope
1147          * with a short read, transport->trans_send could also return less
1148          * than state->max_rdata_len.
1149          */
1150         subreq = state->transport->read_send(state, state->ev, state->rdata,
1151                                              RPC_HEADER_LEN,
1152                                              state->transport->priv);
1153         if (async_req_nomem(subreq, req)) {
1154                 return;
1155         }
1156         subreq->async.fn = cli_api_pipe_read_done;
1157         subreq->async.priv = req;
1158 }
1159
1160 static void cli_api_pipe_read_done(struct async_req *subreq)
1161 {
1162         struct async_req *req = talloc_get_type_abort(
1163                 subreq->async.priv, struct async_req);
1164         struct cli_api_pipe_state *state = talloc_get_type_abort(
1165                 req->private_data, struct cli_api_pipe_state);
1166         NTSTATUS status;
1167         ssize_t received;
1168
1169         status = state->transport->read_recv(subreq, &received);
1170         TALLOC_FREE(subreq);
1171         if (!NT_STATUS_IS_OK(status)) {
1172                 async_req_nterror(req, status);
1173                 return;
1174         }
1175         state->rdata_len = received;
1176         async_req_done(req);
1177 }
1178
1179 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1180                                   uint8_t **prdata, uint32_t *prdata_len)
1181 {
1182         struct cli_api_pipe_state *state = talloc_get_type_abort(
1183                 req->private_data, struct cli_api_pipe_state);
1184         NTSTATUS status;
1185
1186         if (async_req_is_nterror(req, &status)) {
1187                 return status;
1188         }
1189
1190         *prdata = talloc_move(mem_ctx, &state->rdata);
1191         *prdata_len = state->rdata_len;
1192         return NT_STATUS_OK;
1193 }
1194
1195 /****************************************************************************
1196  Send data on an rpc pipe via trans. The prs_struct data must be the last
1197  pdu fragment of an NDR data stream.
1198
1199  Receive response data from an rpc pipe, which may be large...
1200
1201  Read the first fragment: unfortunately have to use SMBtrans for the first
1202  bit, then SMBreadX for subsequent bits.
1203
1204  If first fragment received also wasn't the last fragment, continue
1205  getting fragments until we _do_ receive the last fragment.
1206
1207  Request/Response PDU's look like the following...
1208
1209  |<------------------PDU len----------------------------------------------->|
1210  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1211
1212  +------------+-----------------+-------------+---------------+-------------+
1213  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1214  +------------+-----------------+-------------+---------------+-------------+
1215
1216  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1217  signing & sealing being negotiated.
1218
1219  ****************************************************************************/
1220
1221 struct rpc_api_pipe_state {
1222         struct event_context *ev;
1223         struct rpc_pipe_client *cli;
1224         uint8_t expected_pkt_type;
1225
1226         prs_struct incoming_frag;
1227         struct rpc_hdr_info rhdr;
1228
1229         prs_struct incoming_pdu;        /* Incoming reply */
1230         uint32_t incoming_pdu_offset;
1231 };
1232
1233 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1234 {
1235         prs_mem_free(&state->incoming_frag);
1236         prs_mem_free(&state->incoming_pdu);
1237         return 0;
1238 }
1239
1240 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1241 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1242
1243 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1244                                            struct event_context *ev,
1245                                            struct rpc_pipe_client *cli,
1246                                            prs_struct *data, /* Outgoing PDU */
1247                                            uint8_t expected_pkt_type)
1248 {
1249         struct async_req *result, *subreq;
1250         struct rpc_api_pipe_state *state;
1251         uint16_t max_recv_frag;
1252         NTSTATUS status;
1253
1254         if (!async_req_setup(mem_ctx, &result, &state,
1255                              struct rpc_api_pipe_state)) {
1256                 return NULL;
1257         }
1258         state->ev = ev;
1259         state->cli = cli;
1260         state->expected_pkt_type = expected_pkt_type;
1261         state->incoming_pdu_offset = 0;
1262
1263         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1264
1265         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1266         /* Make incoming_pdu dynamic with no memory. */
1267         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1268
1269         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1270
1271         /*
1272          * Ensure we're not sending too much.
1273          */
1274         if (prs_offset(data) > cli->max_xmit_frag) {
1275                 status = NT_STATUS_INVALID_PARAMETER;
1276                 goto post_status;
1277         }
1278
1279         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1280
1281         max_recv_frag = cli->max_recv_frag;
1282
1283 #ifdef DEVELOPER
1284         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1285 #endif
1286
1287         subreq = cli_api_pipe_send(state, ev, cli->transport,
1288                                    (uint8_t *)prs_data_p(data),
1289                                    prs_offset(data), max_recv_frag);
1290         if (subreq == NULL) {
1291                 status = NT_STATUS_NO_MEMORY;
1292                 goto post_status;
1293         }
1294         subreq->async.fn = rpc_api_pipe_trans_done;
1295         subreq->async.priv = result;
1296         return result;
1297
1298  post_status:
1299         if (async_post_ntstatus(result, ev, status)) {
1300                 return result;
1301         }
1302         TALLOC_FREE(result);
1303         return NULL;
1304 }
1305
1306 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1307 {
1308         struct async_req *req = talloc_get_type_abort(
1309                 subreq->async.priv, struct async_req);
1310         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1311                 req->private_data, struct rpc_api_pipe_state);
1312         NTSTATUS status;
1313         uint8_t *rdata = NULL;
1314         uint32_t rdata_len = 0;
1315         char *rdata_copy;
1316
1317         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1318         TALLOC_FREE(subreq);
1319         if (!NT_STATUS_IS_OK(status)) {
1320                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1321                 async_req_nterror(req, status);
1322                 return;
1323         }
1324
1325         if (rdata == NULL) {
1326                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1327                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1328                 async_req_done(req);
1329                 return;
1330         }
1331
1332         /*
1333          * Give the memory received from cli_trans as dynamic to the current
1334          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1335          * :-(
1336          */
1337         rdata_copy = (char *)memdup(rdata, rdata_len);
1338         TALLOC_FREE(rdata);
1339         if (async_req_nomem(rdata_copy, req)) {
1340                 return;
1341         }
1342         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1343
1344         /* Ensure we have enough data for a pdu. */
1345         subreq = get_complete_frag_send(state, state->ev, state->cli,
1346                                         &state->rhdr, &state->incoming_frag);
1347         if (async_req_nomem(subreq, req)) {
1348                 return;
1349         }
1350         subreq->async.fn = rpc_api_pipe_got_pdu;
1351         subreq->async.priv = req;
1352 }
1353
1354 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1355 {
1356         struct async_req *req = talloc_get_type_abort(
1357                 subreq->async.priv, struct async_req);
1358         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1359                 req->private_data, struct rpc_api_pipe_state);
1360         NTSTATUS status;
1361         char *rdata = NULL;
1362         uint32_t rdata_len = 0;
1363
1364         status = get_complete_frag_recv(subreq);
1365         TALLOC_FREE(subreq);
1366         if (!NT_STATUS_IS_OK(status)) {
1367                 DEBUG(5, ("get_complete_frag failed: %s\n",
1368                           nt_errstr(status)));
1369                 async_req_nterror(req, status);
1370                 return;
1371         }
1372
1373         status = cli_pipe_validate_current_pdu(
1374                 state->cli, &state->rhdr, &state->incoming_frag,
1375                 state->expected_pkt_type, &rdata, &rdata_len,
1376                 &state->incoming_pdu);
1377
1378         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1379                   (unsigned)prs_data_size(&state->incoming_frag),
1380                   (unsigned)state->incoming_pdu_offset,
1381                   nt_errstr(status)));
1382
1383         if (!NT_STATUS_IS_OK(status)) {
1384                 async_req_nterror(req, status);
1385                 return;
1386         }
1387
1388         if ((state->rhdr.flags & RPC_FLG_FIRST)
1389             && (state->rhdr.pack_type[0] == 0)) {
1390                 /*
1391                  * Set the data type correctly for big-endian data on the
1392                  * first packet.
1393                  */
1394                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1395                           "big-endian.\n",
1396                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1397                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1398         }
1399         /*
1400          * Check endianness on subsequent packets.
1401          */
1402         if (state->incoming_frag.bigendian_data
1403             != state->incoming_pdu.bigendian_data) {
1404                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1405                          "%s\n",
1406                          state->incoming_pdu.bigendian_data?"big":"little",
1407                          state->incoming_frag.bigendian_data?"big":"little"));
1408                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1409                 return;
1410         }
1411
1412         /* Now copy the data portion out of the pdu into rbuf. */
1413         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1414                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1415                 return;
1416         }
1417
1418         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1419                rdata, (size_t)rdata_len);
1420         state->incoming_pdu_offset += rdata_len;
1421
1422         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1423                                             &state->incoming_frag);
1424         if (!NT_STATUS_IS_OK(status)) {
1425                 async_req_nterror(req, status);
1426                 return;
1427         }
1428
1429         if (state->rhdr.flags & RPC_FLG_LAST) {
1430                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1431                           rpccli_pipe_txt(debug_ctx(), state->cli),
1432                           (unsigned)prs_data_size(&state->incoming_pdu)));
1433                 async_req_done(req);
1434                 return;
1435         }
1436
1437         subreq = get_complete_frag_send(state, state->ev, state->cli,
1438                                         &state->rhdr, &state->incoming_frag);
1439         if (async_req_nomem(subreq, req)) {
1440                 return;
1441         }
1442         subreq->async.fn = rpc_api_pipe_got_pdu;
1443         subreq->async.priv = req;
1444 }
1445
1446 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1447                                   prs_struct *reply_pdu)
1448 {
1449         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1450                 req->private_data, struct rpc_api_pipe_state);
1451         NTSTATUS status;
1452
1453         if (async_req_is_nterror(req, &status)) {
1454                 return status;
1455         }
1456
1457         *reply_pdu = state->incoming_pdu;
1458         reply_pdu->mem_ctx = mem_ctx;
1459
1460         /*
1461          * Prevent state->incoming_pdu from being freed in
1462          * rpc_api_pipe_state_destructor()
1463          */
1464         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1465
1466         return NT_STATUS_OK;
1467 }
1468
1469 /*******************************************************************
1470  Creates krb5 auth bind.
1471  ********************************************************************/
1472
1473 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1474                                                 enum pipe_auth_level auth_level,
1475                                                 RPC_HDR_AUTH *pauth_out,
1476                                                 prs_struct *auth_data)
1477 {
1478 #ifdef HAVE_KRB5
1479         int ret;
1480         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1481         DATA_BLOB tkt = data_blob_null;
1482         DATA_BLOB tkt_wrapped = data_blob_null;
1483
1484         /* We may change the pad length before marshalling. */
1485         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1486
1487         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1488                 a->service_principal ));
1489
1490         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1491
1492         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1493                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1494
1495         if (ret) {
1496                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1497                         "failed with %s\n",
1498                         a->service_principal,
1499                         error_message(ret) ));
1500
1501                 data_blob_free(&tkt);
1502                 prs_mem_free(auth_data);
1503                 return NT_STATUS_INVALID_PARAMETER;
1504         }
1505
1506         /* wrap that up in a nice GSS-API wrapping */
1507         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1508
1509         data_blob_free(&tkt);
1510
1511         /* Auth len in the rpc header doesn't include auth_header. */
1512         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1513                 data_blob_free(&tkt_wrapped);
1514                 prs_mem_free(auth_data);
1515                 return NT_STATUS_NO_MEMORY;
1516         }
1517
1518         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1519         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1520
1521         data_blob_free(&tkt_wrapped);
1522         return NT_STATUS_OK;
1523 #else
1524         return NT_STATUS_INVALID_PARAMETER;
1525 #endif
1526 }
1527
1528 /*******************************************************************
1529  Creates SPNEGO NTLMSSP auth bind.
1530  ********************************************************************/
1531
1532 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1533                                                 enum pipe_auth_level auth_level,
1534                                                 RPC_HDR_AUTH *pauth_out,
1535                                                 prs_struct *auth_data)
1536 {
1537         NTSTATUS nt_status;
1538         DATA_BLOB null_blob = data_blob_null;
1539         DATA_BLOB request = data_blob_null;
1540         DATA_BLOB spnego_msg = data_blob_null;
1541
1542         /* We may change the pad length before marshalling. */
1543         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1544
1545         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1546         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1547                                         null_blob,
1548                                         &request);
1549
1550         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1551                 data_blob_free(&request);
1552                 prs_mem_free(auth_data);
1553                 return nt_status;
1554         }
1555
1556         /* Wrap this in SPNEGO. */
1557         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1558
1559         data_blob_free(&request);
1560
1561         /* Auth len in the rpc header doesn't include auth_header. */
1562         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1563                 data_blob_free(&spnego_msg);
1564                 prs_mem_free(auth_data);
1565                 return NT_STATUS_NO_MEMORY;
1566         }
1567
1568         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1569         dump_data(5, spnego_msg.data, spnego_msg.length);
1570
1571         data_blob_free(&spnego_msg);
1572         return NT_STATUS_OK;
1573 }
1574
1575 /*******************************************************************
1576  Creates NTLMSSP auth bind.
1577  ********************************************************************/
1578
1579 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1580                                                 enum pipe_auth_level auth_level,
1581                                                 RPC_HDR_AUTH *pauth_out,
1582                                                 prs_struct *auth_data)
1583 {
1584         NTSTATUS nt_status;
1585         DATA_BLOB null_blob = data_blob_null;
1586         DATA_BLOB request = data_blob_null;
1587
1588         /* We may change the pad length before marshalling. */
1589         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1590
1591         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1592         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1593                                         null_blob,
1594                                         &request);
1595
1596         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1597                 data_blob_free(&request);
1598                 prs_mem_free(auth_data);
1599                 return nt_status;
1600         }
1601
1602         /* Auth len in the rpc header doesn't include auth_header. */
1603         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1604                 data_blob_free(&request);
1605                 prs_mem_free(auth_data);
1606                 return NT_STATUS_NO_MEMORY;
1607         }
1608
1609         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1610         dump_data(5, request.data, request.length);
1611
1612         data_blob_free(&request);
1613         return NT_STATUS_OK;
1614 }
1615
1616 /*******************************************************************
1617  Creates schannel auth bind.
1618  ********************************************************************/
1619
1620 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1621                                                 enum pipe_auth_level auth_level,
1622                                                 RPC_HDR_AUTH *pauth_out,
1623                                                 prs_struct *auth_data)
1624 {
1625         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1626
1627         /* We may change the pad length before marshalling. */
1628         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1629
1630         /* Use lp_workgroup() if domain not specified */
1631
1632         if (!cli->auth->domain || !cli->auth->domain[0]) {
1633                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1634                 if (cli->auth->domain == NULL) {
1635                         return NT_STATUS_NO_MEMORY;
1636                 }
1637         }
1638
1639         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1640                                    global_myname());
1641
1642         /*
1643          * Now marshall the data into the auth parse_struct.
1644          */
1645
1646         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1647                                        &schannel_neg, auth_data, 0)) {
1648                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1649                 prs_mem_free(auth_data);
1650                 return NT_STATUS_NO_MEMORY;
1651         }
1652
1653         return NT_STATUS_OK;
1654 }
1655
1656 /*******************************************************************
1657  Creates the internals of a DCE/RPC bind request or alter context PDU.
1658  ********************************************************************/
1659
1660 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1661                                                 prs_struct *rpc_out, 
1662                                                 uint32 rpc_call_id,
1663                                                 const RPC_IFACE *abstract,
1664                                                 const RPC_IFACE *transfer,
1665                                                 RPC_HDR_AUTH *phdr_auth,
1666                                                 prs_struct *pauth_info)
1667 {
1668         RPC_HDR hdr;
1669         RPC_HDR_RB hdr_rb;
1670         RPC_CONTEXT rpc_ctx;
1671         uint16 auth_len = prs_offset(pauth_info);
1672         uint8 ss_padding_len = 0;
1673         uint16 frag_len = 0;
1674
1675         /* create the RPC context. */
1676         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1677
1678         /* create the bind request RPC_HDR_RB */
1679         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1680
1681         /* Start building the frag length. */
1682         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1683
1684         /* Do we need to pad ? */
1685         if (auth_len) {
1686                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1687                 if (data_len % 8) {
1688                         ss_padding_len = 8 - (data_len % 8);
1689                         phdr_auth->auth_pad_len = ss_padding_len;
1690                 }
1691                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1692         }
1693
1694         /* Create the request RPC_HDR */
1695         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1696
1697         /* Marshall the RPC header */
1698         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1699                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1700                 return NT_STATUS_NO_MEMORY;
1701         }
1702
1703         /* Marshall the bind request data */
1704         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1705                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1706                 return NT_STATUS_NO_MEMORY;
1707         }
1708
1709         /*
1710          * Grow the outgoing buffer to store any auth info.
1711          */
1712
1713         if(auth_len != 0) {
1714                 if (ss_padding_len) {
1715                         char pad[8];
1716                         memset(pad, '\0', 8);
1717                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1718                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1719                                 return NT_STATUS_NO_MEMORY;
1720                         }
1721                 }
1722
1723                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1724                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1725                         return NT_STATUS_NO_MEMORY;
1726                 }
1727
1728
1729                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1730                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1731                         return NT_STATUS_NO_MEMORY;
1732                 }
1733         }
1734
1735         return NT_STATUS_OK;
1736 }
1737
1738 /*******************************************************************
1739  Creates a DCE/RPC bind request.
1740  ********************************************************************/
1741
1742 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1743                                 prs_struct *rpc_out, 
1744                                 uint32 rpc_call_id,
1745                                 const RPC_IFACE *abstract,
1746                                 const RPC_IFACE *transfer,
1747                                 enum pipe_auth_type auth_type,
1748                                 enum pipe_auth_level auth_level)
1749 {
1750         RPC_HDR_AUTH hdr_auth;
1751         prs_struct auth_info;
1752         NTSTATUS ret = NT_STATUS_OK;
1753
1754         ZERO_STRUCT(hdr_auth);
1755         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1756                 return NT_STATUS_NO_MEMORY;
1757
1758         switch (auth_type) {
1759                 case PIPE_AUTH_TYPE_SCHANNEL:
1760                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1761                         if (!NT_STATUS_IS_OK(ret)) {
1762                                 prs_mem_free(&auth_info);
1763                                 return ret;
1764                         }
1765                         break;
1766
1767                 case PIPE_AUTH_TYPE_NTLMSSP:
1768                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1769                         if (!NT_STATUS_IS_OK(ret)) {
1770                                 prs_mem_free(&auth_info);
1771                                 return ret;
1772                         }
1773                         break;
1774
1775                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1776                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1777                         if (!NT_STATUS_IS_OK(ret)) {
1778                                 prs_mem_free(&auth_info);
1779                                 return ret;
1780                         }
1781                         break;
1782
1783                 case PIPE_AUTH_TYPE_KRB5:
1784                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1785                         if (!NT_STATUS_IS_OK(ret)) {
1786                                 prs_mem_free(&auth_info);
1787                                 return ret;
1788                         }
1789                         break;
1790
1791                 case PIPE_AUTH_TYPE_NONE:
1792                         break;
1793
1794                 default:
1795                         /* "Can't" happen. */
1796                         return NT_STATUS_INVALID_INFO_CLASS;
1797         }
1798
1799         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1800                                                 rpc_out, 
1801                                                 rpc_call_id,
1802                                                 abstract,
1803                                                 transfer,
1804                                                 &hdr_auth,
1805                                                 &auth_info);
1806
1807         prs_mem_free(&auth_info);
1808         return ret;
1809 }
1810
1811 /*******************************************************************
1812  Create and add the NTLMSSP sign/seal auth header and data.
1813  ********************************************************************/
1814
1815 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1816                                         RPC_HDR *phdr,
1817                                         uint32 ss_padding_len,
1818                                         prs_struct *outgoing_pdu)
1819 {
1820         RPC_HDR_AUTH auth_info;
1821         NTSTATUS status;
1822         DATA_BLOB auth_blob = data_blob_null;
1823         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1824
1825         if (!cli->auth->a_u.ntlmssp_state) {
1826                 return NT_STATUS_INVALID_PARAMETER;
1827         }
1828
1829         /* Init and marshall the auth header. */
1830         init_rpc_hdr_auth(&auth_info,
1831                         map_pipe_auth_type_to_rpc_auth_type(
1832                                 cli->auth->auth_type),
1833                         cli->auth->auth_level,
1834                         ss_padding_len,
1835                         1 /* context id. */);
1836
1837         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1838                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1839                 data_blob_free(&auth_blob);
1840                 return NT_STATUS_NO_MEMORY;
1841         }
1842
1843         switch (cli->auth->auth_level) {
1844                 case PIPE_AUTH_LEVEL_PRIVACY:
1845                         /* Data portion is encrypted. */
1846                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1847                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1848                                         data_and_pad_len,
1849                                         (unsigned char *)prs_data_p(outgoing_pdu),
1850                                         (size_t)prs_offset(outgoing_pdu),
1851                                         &auth_blob);
1852                         if (!NT_STATUS_IS_OK(status)) {
1853                                 data_blob_free(&auth_blob);
1854                                 return status;
1855                         }
1856                         break;
1857
1858                 case PIPE_AUTH_LEVEL_INTEGRITY:
1859                         /* Data is signed. */
1860                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1861                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1862                                         data_and_pad_len,
1863                                         (unsigned char *)prs_data_p(outgoing_pdu),
1864                                         (size_t)prs_offset(outgoing_pdu),
1865                                         &auth_blob);
1866                         if (!NT_STATUS_IS_OK(status)) {
1867                                 data_blob_free(&auth_blob);
1868                                 return status;
1869                         }
1870                         break;
1871
1872                 default:
1873                         /* Can't happen. */
1874                         smb_panic("bad auth level");
1875                         /* Notreached. */
1876                         return NT_STATUS_INVALID_PARAMETER;
1877         }
1878
1879         /* Finally marshall the blob. */
1880
1881         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1882                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1883                         (unsigned int)NTLMSSP_SIG_SIZE));
1884                 data_blob_free(&auth_blob);
1885                 return NT_STATUS_NO_MEMORY;
1886         }
1887
1888         data_blob_free(&auth_blob);
1889         return NT_STATUS_OK;
1890 }
1891
1892 /*******************************************************************
1893  Create and add the schannel sign/seal auth header and data.
1894  ********************************************************************/
1895
1896 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1897                                         RPC_HDR *phdr,
1898                                         uint32 ss_padding_len,
1899                                         prs_struct *outgoing_pdu)
1900 {
1901         RPC_HDR_AUTH auth_info;
1902         RPC_AUTH_SCHANNEL_CHK verf;
1903         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1904         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1905         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1906
1907         if (!sas) {
1908                 return NT_STATUS_INVALID_PARAMETER;
1909         }
1910
1911         /* Init and marshall the auth header. */
1912         init_rpc_hdr_auth(&auth_info,
1913                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1914                         cli->auth->auth_level,
1915                         ss_padding_len,
1916                         1 /* context id. */);
1917
1918         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1919                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1920                 return NT_STATUS_NO_MEMORY;
1921         }
1922
1923         switch (cli->auth->auth_level) {
1924                 case PIPE_AUTH_LEVEL_PRIVACY:
1925                 case PIPE_AUTH_LEVEL_INTEGRITY:
1926                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1927                                 sas->seq_num));
1928
1929                         schannel_encode(sas,
1930                                         cli->auth->auth_level,
1931                                         SENDER_IS_INITIATOR,
1932                                         &verf,
1933                                         data_p,
1934                                         data_and_pad_len);
1935
1936                         sas->seq_num++;
1937                         break;
1938
1939                 default:
1940                         /* Can't happen. */
1941                         smb_panic("bad auth level");
1942                         /* Notreached. */
1943                         return NT_STATUS_INVALID_PARAMETER;
1944         }
1945
1946         /* Finally marshall the blob. */
1947         smb_io_rpc_auth_schannel_chk("",
1948                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1949                         &verf,
1950                         outgoing_pdu,
1951                         0);
1952
1953         return NT_STATUS_OK;
1954 }
1955
1956 /*******************************************************************
1957  Calculate how much data we're going to send in this packet, also
1958  work out any sign/seal padding length.
1959  ********************************************************************/
1960
1961 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1962                                         uint32 data_left,
1963                                         uint16 *p_frag_len,
1964                                         uint16 *p_auth_len,
1965                                         uint32 *p_ss_padding)
1966 {
1967         uint32 data_space, data_len;
1968
1969 #ifdef DEVELOPER
1970         if ((data_left > 0) && (sys_random() % 2)) {
1971                 data_left = MAX(data_left/2, 1);
1972         }
1973 #endif
1974
1975         switch (cli->auth->auth_level) {
1976                 case PIPE_AUTH_LEVEL_NONE:
1977                 case PIPE_AUTH_LEVEL_CONNECT:
1978                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1979                         data_len = MIN(data_space, data_left);
1980                         *p_ss_padding = 0;
1981                         *p_auth_len = 0;
1982                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1983                         return data_len;
1984
1985                 case PIPE_AUTH_LEVEL_INTEGRITY:
1986                 case PIPE_AUTH_LEVEL_PRIVACY:
1987                         /* Treat the same for all authenticated rpc requests. */
1988                         switch(cli->auth->auth_type) {
1989                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1990                                 case PIPE_AUTH_TYPE_NTLMSSP:
1991                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1992                                         break;
1993                                 case PIPE_AUTH_TYPE_SCHANNEL:
1994                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1995                                         break;
1996                                 default:
1997                                         smb_panic("bad auth type");
1998                                         break;
1999                         }
2000
2001                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2002                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2003
2004                         data_len = MIN(data_space, data_left);
2005                         *p_ss_padding = 0;
2006                         if (data_len % 8) {
2007                                 *p_ss_padding = 8 - (data_len % 8);
2008                         }
2009                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2010                                         data_len + *p_ss_padding +              /* data plus padding. */
2011                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2012                         return data_len;
2013
2014                 default:
2015                         smb_panic("bad auth level");
2016                         /* Notreached. */
2017                         return 0;
2018         }
2019 }
2020
2021 /*******************************************************************
2022  External interface.
2023  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2024  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2025  and deals with signing/sealing details.
2026  ********************************************************************/
2027
2028 struct rpc_api_pipe_req_state {
2029         struct event_context *ev;
2030         struct rpc_pipe_client *cli;
2031         uint8_t op_num;
2032         uint32_t call_id;
2033         prs_struct *req_data;
2034         uint32_t req_data_sent;
2035         prs_struct outgoing_frag;
2036         prs_struct reply_pdu;
2037 };
2038
2039 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2040 {
2041         prs_mem_free(&s->outgoing_frag);
2042         prs_mem_free(&s->reply_pdu);
2043         return 0;
2044 }
2045
2046 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2047 static void rpc_api_pipe_req_done(struct async_req *subreq);
2048 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2049                                   bool *is_last_frag);
2050
2051 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2052                                         struct event_context *ev,
2053                                         struct rpc_pipe_client *cli,
2054                                         uint8_t op_num,
2055                                         prs_struct *req_data)
2056 {
2057         struct async_req *result, *subreq;
2058         struct rpc_api_pipe_req_state *state;
2059         NTSTATUS status;
2060         bool is_last_frag;
2061
2062         if (!async_req_setup(mem_ctx, &result, &state,
2063                              struct rpc_api_pipe_req_state)) {
2064                 return NULL;
2065         }
2066         state->ev = ev;
2067         state->cli = cli;
2068         state->op_num = op_num;
2069         state->req_data = req_data;
2070         state->req_data_sent = 0;
2071         state->call_id = get_rpc_call_id();
2072
2073         if (cli->max_xmit_frag
2074             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2075                 /* Server is screwed up ! */
2076                 status = NT_STATUS_INVALID_PARAMETER;
2077                 goto post_status;
2078         }
2079
2080         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2081
2082         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2083                       state, MARSHALL)) {
2084                 status = NT_STATUS_NO_MEMORY;
2085                 goto post_status;
2086         }
2087
2088         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2089
2090         status = prepare_next_frag(state, &is_last_frag);
2091         if (!NT_STATUS_IS_OK(status)) {
2092                 goto post_status;
2093         }
2094
2095         if (is_last_frag) {
2096                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2097                                            &state->outgoing_frag,
2098                                            RPC_RESPONSE);
2099                 if (subreq == NULL) {
2100                         status = NT_STATUS_NO_MEMORY;
2101                         goto post_status;
2102                 }
2103                 subreq->async.fn = rpc_api_pipe_req_done;
2104                 subreq->async.priv = result;
2105         } else {
2106                 subreq = rpc_write_send(
2107                         state, ev, cli->transport,
2108                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2109                         prs_offset(&state->outgoing_frag));
2110                 if (subreq == NULL) {
2111                         status = NT_STATUS_NO_MEMORY;
2112                         goto post_status;
2113                 }
2114                 subreq->async.fn = rpc_api_pipe_req_write_done;
2115                 subreq->async.priv = result;
2116         }
2117         return result;
2118
2119  post_status:
2120         if (async_post_ntstatus(result, ev, status)) {
2121                 return result;
2122         }
2123         TALLOC_FREE(result);
2124         return NULL;
2125 }
2126
2127 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2128                                   bool *is_last_frag)
2129 {
2130         RPC_HDR hdr;
2131         RPC_HDR_REQ hdr_req;
2132         uint32_t data_sent_thistime;
2133         uint16_t auth_len;
2134         uint16_t frag_len;
2135         uint8_t flags = 0;
2136         uint32_t ss_padding;
2137         uint32_t data_left;
2138         char pad[8] = { 0, };
2139         NTSTATUS status;
2140
2141         data_left = prs_offset(state->req_data) - state->req_data_sent;
2142
2143         data_sent_thistime = calculate_data_len_tosend(
2144                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2145
2146         if (state->req_data_sent == 0) {
2147                 flags = RPC_FLG_FIRST;
2148         }
2149
2150         if (data_sent_thistime == data_left) {
2151                 flags |= RPC_FLG_LAST;
2152         }
2153
2154         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2155                 return NT_STATUS_NO_MEMORY;
2156         }
2157
2158         /* Create and marshall the header and request header. */
2159         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2160                      auth_len);
2161
2162         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2163                 return NT_STATUS_NO_MEMORY;
2164         }
2165
2166         /* Create the rpc request RPC_HDR_REQ */
2167         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2168                          state->op_num);
2169
2170         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2171                                 &state->outgoing_frag, 0)) {
2172                 return NT_STATUS_NO_MEMORY;
2173         }
2174
2175         /* Copy in the data, plus any ss padding. */
2176         if (!prs_append_some_prs_data(&state->outgoing_frag,
2177                                       state->req_data, state->req_data_sent,
2178                                       data_sent_thistime)) {
2179                 return NT_STATUS_NO_MEMORY;
2180         }
2181
2182         /* Copy the sign/seal padding data. */
2183         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2184                 return NT_STATUS_NO_MEMORY;
2185         }
2186
2187         /* Generate any auth sign/seal and add the auth footer. */
2188         switch (state->cli->auth->auth_type) {
2189         case PIPE_AUTH_TYPE_NONE:
2190                 status = NT_STATUS_OK;
2191                 break;
2192         case PIPE_AUTH_TYPE_NTLMSSP:
2193         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2194                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2195                                                  &state->outgoing_frag);
2196                 break;
2197         case PIPE_AUTH_TYPE_SCHANNEL:
2198                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2199                                                   &state->outgoing_frag);
2200                 break;
2201         default:
2202                 status = NT_STATUS_INVALID_PARAMETER;
2203                 break;
2204         }
2205
2206         state->req_data_sent += data_sent_thistime;
2207         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2208
2209         return status;
2210 }
2211
2212 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2213 {
2214         struct async_req *req = talloc_get_type_abort(
2215                 subreq->async.priv, struct async_req);
2216         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2217                 req->private_data, struct rpc_api_pipe_req_state);
2218         NTSTATUS status;
2219         bool is_last_frag;
2220
2221         status = rpc_write_recv(subreq);
2222         TALLOC_FREE(subreq);
2223         if (!NT_STATUS_IS_OK(status)) {
2224                 async_req_nterror(req, status);
2225                 return;
2226         }
2227
2228         status = prepare_next_frag(state, &is_last_frag);
2229         if (!NT_STATUS_IS_OK(status)) {
2230                 async_req_nterror(req, status);
2231                 return;
2232         }
2233
2234         if (is_last_frag) {
2235                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2236                                            &state->outgoing_frag,
2237                                            RPC_RESPONSE);
2238                 if (async_req_nomem(subreq, req)) {
2239                         return;
2240                 }
2241                 subreq->async.fn = rpc_api_pipe_req_done;
2242                 subreq->async.priv = req;
2243         } else {
2244                 subreq = rpc_write_send(
2245                         state, state->ev,
2246                         state->cli->transport,
2247                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2248                         prs_offset(&state->outgoing_frag));
2249                 if (async_req_nomem(subreq, req)) {
2250                         return;
2251                 }
2252                 subreq->async.fn = rpc_api_pipe_req_write_done;
2253                 subreq->async.priv = req;
2254         }
2255 }
2256
2257 static void rpc_api_pipe_req_done(struct async_req *subreq)
2258 {
2259         struct async_req *req = talloc_get_type_abort(
2260                 subreq->async.priv, struct async_req);
2261         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2262                 req->private_data, struct rpc_api_pipe_req_state);
2263         NTSTATUS status;
2264
2265         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2266         TALLOC_FREE(subreq);
2267         if (!NT_STATUS_IS_OK(status)) {
2268                 async_req_nterror(req, status);
2269                 return;
2270         }
2271         async_req_done(req);
2272 }
2273
2274 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2275                                prs_struct *reply_pdu)
2276 {
2277         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2278                 req->private_data, struct rpc_api_pipe_req_state);
2279         NTSTATUS status;
2280
2281         if (async_req_is_nterror(req, &status)) {
2282                 /*
2283                  * We always have to initialize to reply pdu, even if there is
2284                  * none. The rpccli_* caller routines expect this.
2285                  */
2286                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2287                 return status;
2288         }
2289
2290         *reply_pdu = state->reply_pdu;
2291         reply_pdu->mem_ctx = mem_ctx;
2292
2293         /*
2294          * Prevent state->req_pdu from being freed in
2295          * rpc_api_pipe_req_state_destructor()
2296          */
2297         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2298
2299         return NT_STATUS_OK;
2300 }
2301
2302 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2303                         uint8 op_num,
2304                         prs_struct *in_data,
2305                         prs_struct *out_data)
2306 {
2307         TALLOC_CTX *frame = talloc_stackframe();
2308         struct event_context *ev;
2309         struct async_req *req;
2310         NTSTATUS status = NT_STATUS_NO_MEMORY;
2311
2312         ev = event_context_init(frame);
2313         if (ev == NULL) {
2314                 goto fail;
2315         }
2316
2317         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2318         if (req == NULL) {
2319                 goto fail;
2320         }
2321
2322         while (req->state < ASYNC_REQ_DONE) {
2323                 event_loop_once(ev);
2324         }
2325
2326         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2327  fail:
2328         TALLOC_FREE(frame);
2329         return status;
2330 }
2331
2332 #if 0
2333 /****************************************************************************
2334  Set the handle state.
2335 ****************************************************************************/
2336
2337 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2338                                    const char *pipe_name, uint16 device_state)
2339 {
2340         bool state_set = False;
2341         char param[2];
2342         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2343         char *rparam = NULL;
2344         char *rdata = NULL;
2345         uint32 rparam_len, rdata_len;
2346
2347         if (pipe_name == NULL)
2348                 return False;
2349
2350         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2351                  cli->fnum, pipe_name, device_state));
2352
2353         /* create parameters: device state */
2354         SSVAL(param, 0, device_state);
2355
2356         /* create setup parameters. */
2357         setup[0] = 0x0001; 
2358         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2359
2360         /* send the data on \PIPE\ */
2361         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2362                     setup, 2, 0,                /* setup, length, max */
2363                     param, 2, 0,                /* param, length, max */
2364                     NULL, 0, 1024,              /* data, length, max */
2365                     &rparam, &rparam_len,        /* return param, length */
2366                     &rdata, &rdata_len))         /* return data, length */
2367         {
2368                 DEBUG(5, ("Set Handle state: return OK\n"));
2369                 state_set = True;
2370         }
2371
2372         SAFE_FREE(rparam);
2373         SAFE_FREE(rdata);
2374
2375         return state_set;
2376 }
2377 #endif
2378
2379 /****************************************************************************
2380  Check the rpc bind acknowledge response.
2381 ****************************************************************************/
2382
2383 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2384 {
2385         if ( hdr_ba->addr.len == 0) {
2386                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2387         }
2388
2389         /* check the transfer syntax */
2390         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2391              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2392                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2393                 return False;
2394         }
2395
2396         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2397                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2398                           hdr_ba->res.num_results, hdr_ba->res.reason));
2399         }
2400
2401         DEBUG(5,("check_bind_response: accepted!\n"));
2402         return True;
2403 }
2404
2405 /*******************************************************************
2406  Creates a DCE/RPC bind authentication response.
2407  This is the packet that is sent back to the server once we
2408  have received a BIND-ACK, to finish the third leg of
2409  the authentication handshake.
2410  ********************************************************************/
2411
2412 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2413                                 uint32 rpc_call_id,
2414                                 enum pipe_auth_type auth_type,
2415                                 enum pipe_auth_level auth_level,
2416                                 DATA_BLOB *pauth_blob,
2417                                 prs_struct *rpc_out)
2418 {
2419         RPC_HDR hdr;
2420         RPC_HDR_AUTH hdr_auth;
2421         uint32 pad = 0;
2422
2423         /* Create the request RPC_HDR */
2424         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2425                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2426                      pauth_blob->length );
2427
2428         /* Marshall it. */
2429         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2430                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2431                 return NT_STATUS_NO_MEMORY;
2432         }
2433
2434         /*
2435                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2436                 about padding - shouldn't this pad to length 8 ? JRA.
2437         */
2438
2439         /* 4 bytes padding. */
2440         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2441                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2442                 return NT_STATUS_NO_MEMORY;
2443         }
2444
2445         /* Create the request RPC_HDR_AUTHA */
2446         init_rpc_hdr_auth(&hdr_auth,
2447                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2448                         auth_level, 0, 1);
2449
2450         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2451                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2452                 return NT_STATUS_NO_MEMORY;
2453         }
2454
2455         /*
2456          * Append the auth data to the outgoing buffer.
2457          */
2458
2459         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2460                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2461                 return NT_STATUS_NO_MEMORY;
2462         }
2463
2464         return NT_STATUS_OK;
2465 }
2466
2467 /*******************************************************************
2468  Creates a DCE/RPC bind alter context authentication request which
2469  may contain a spnego auth blobl
2470  ********************************************************************/
2471
2472 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2473                                         const RPC_IFACE *abstract,
2474                                         const RPC_IFACE *transfer,
2475                                         enum pipe_auth_level auth_level,
2476                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2477                                         prs_struct *rpc_out)
2478 {
2479         RPC_HDR_AUTH hdr_auth;
2480         prs_struct auth_info;
2481         NTSTATUS ret = NT_STATUS_OK;
2482
2483         ZERO_STRUCT(hdr_auth);
2484         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2485                 return NT_STATUS_NO_MEMORY;
2486
2487         /* We may change the pad length before marshalling. */
2488         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2489
2490         if (pauth_blob->length) {
2491                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2492                         prs_mem_free(&auth_info);
2493                         return NT_STATUS_NO_MEMORY;
2494                 }
2495         }
2496
2497         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2498                                                 rpc_out, 
2499                                                 rpc_call_id,
2500                                                 abstract,
2501                                                 transfer,
2502                                                 &hdr_auth,
2503                                                 &auth_info);
2504         prs_mem_free(&auth_info);
2505         return ret;
2506 }
2507
2508 /****************************************************************************
2509  Do an rpc bind.
2510 ****************************************************************************/
2511
2512 struct rpc_pipe_bind_state {
2513         struct event_context *ev;
2514         struct rpc_pipe_client *cli;
2515         prs_struct rpc_out;
2516         uint32_t rpc_call_id;
2517 };
2518
2519 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2520 {
2521         prs_mem_free(&state->rpc_out);
2522         return 0;
2523 }
2524
2525 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2526 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2527                                            struct rpc_pipe_bind_state *state,
2528                                            struct rpc_hdr_info *phdr,
2529                                            prs_struct *reply_pdu);
2530 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2531 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2532                                                     struct rpc_pipe_bind_state *state,
2533                                                     struct rpc_hdr_info *phdr,
2534                                                     prs_struct *reply_pdu);
2535 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2536
2537 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2538                                      struct event_context *ev,
2539                                      struct rpc_pipe_client *cli,
2540                                      struct cli_pipe_auth_data *auth)
2541 {
2542         struct async_req *result, *subreq;
2543         struct rpc_pipe_bind_state *state;
2544         NTSTATUS status;
2545
2546         if (!async_req_setup(mem_ctx, &result, &state,
2547                              struct rpc_pipe_bind_state)) {
2548                 return NULL;
2549         }
2550
2551         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2552                 rpccli_pipe_txt(debug_ctx(), cli),
2553                 (unsigned int)auth->auth_type,
2554                 (unsigned int)auth->auth_level ));
2555
2556         state->ev = ev;
2557         state->cli = cli;
2558         state->rpc_call_id = get_rpc_call_id();
2559
2560         prs_init_empty(&state->rpc_out, state, MARSHALL);
2561         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2562
2563         cli->auth = talloc_move(cli, &auth);
2564
2565         /* Marshall the outgoing data. */
2566         status = create_rpc_bind_req(cli, &state->rpc_out,
2567                                      state->rpc_call_id,
2568                                      &cli->abstract_syntax,
2569                                      &cli->transfer_syntax,
2570                                      cli->auth->auth_type,
2571                                      cli->auth->auth_level);
2572
2573         if (!NT_STATUS_IS_OK(status)) {
2574                 goto post_status;
2575         }
2576
2577         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2578                                    RPC_BINDACK);
2579         if (subreq == NULL) {
2580                 status = NT_STATUS_NO_MEMORY;
2581                 goto post_status;
2582         }
2583         subreq->async.fn = rpc_pipe_bind_step_one_done;
2584         subreq->async.priv = result;
2585         return result;
2586
2587  post_status:
2588         if (async_post_ntstatus(result, ev, status)) {
2589                 return result;
2590         }
2591         TALLOC_FREE(result);
2592         return NULL;
2593 }
2594
2595 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2596 {
2597         struct async_req *req = talloc_get_type_abort(
2598                 subreq->async.priv, struct async_req);
2599         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2600                 req->private_data, struct rpc_pipe_bind_state);
2601         prs_struct reply_pdu;
2602         struct rpc_hdr_info hdr;
2603         struct rpc_hdr_ba_info hdr_ba;
2604         NTSTATUS status;
2605
2606         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2607         TALLOC_FREE(subreq);
2608         if (!NT_STATUS_IS_OK(status)) {
2609                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2610                           rpccli_pipe_txt(debug_ctx(), state->cli),
2611                           nt_errstr(status)));
2612                 async_req_nterror(req, status);
2613                 return;
2614         }
2615
2616         /* Unmarshall the RPC header */
2617         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2618                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2619                 prs_mem_free(&reply_pdu);
2620                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2621                 return;
2622         }
2623
2624         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2625                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2626                           "RPC_HDR_BA.\n"));
2627                 prs_mem_free(&reply_pdu);
2628                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2629                 return;
2630         }
2631
2632         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2633                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2634                 prs_mem_free(&reply_pdu);
2635                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2636                 return;
2637         }
2638
2639         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2640         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2641
2642         /*
2643          * For authenticated binds we may need to do 3 or 4 leg binds.
2644          */
2645
2646         switch(state->cli->auth->auth_type) {
2647
2648         case PIPE_AUTH_TYPE_NONE:
2649         case PIPE_AUTH_TYPE_SCHANNEL:
2650                 /* Bind complete. */
2651                 prs_mem_free(&reply_pdu);
2652                 async_req_done(req);
2653                 break;
2654
2655         case PIPE_AUTH_TYPE_NTLMSSP:
2656                 /* Need to send AUTH3 packet - no reply. */
2657                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2658                                                     &reply_pdu);
2659                 prs_mem_free(&reply_pdu);
2660                 if (!NT_STATUS_IS_OK(status)) {
2661                         async_req_nterror(req, status);
2662                 }
2663                 break;
2664
2665         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2666                 /* Need to send alter context request and reply. */
2667                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2668                                                              &reply_pdu);
2669                 prs_mem_free(&reply_pdu);
2670                 if (!NT_STATUS_IS_OK(status)) {
2671                         async_req_nterror(req, status);
2672                 }
2673                 break;
2674
2675         case PIPE_AUTH_TYPE_KRB5:
2676                 /* */
2677
2678         default:
2679                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2680                          (unsigned int)state->cli->auth->auth_type));
2681                 prs_mem_free(&reply_pdu);
2682                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2683         }
2684 }
2685
2686 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2687                                            struct rpc_pipe_bind_state *state,
2688                                            struct rpc_hdr_info *phdr,
2689                                            prs_struct *reply_pdu)
2690 {
2691         DATA_BLOB server_response = data_blob_null;
2692         DATA_BLOB client_reply = data_blob_null;
2693         struct rpc_hdr_auth_info hdr_auth;
2694         struct async_req *subreq;
2695         NTSTATUS status;
2696
2697         if ((phdr->auth_len == 0)
2698             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2699                 return NT_STATUS_INVALID_PARAMETER;
2700         }
2701
2702         if (!prs_set_offset(
2703                     reply_pdu,
2704                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2705                 return NT_STATUS_INVALID_PARAMETER;
2706         }
2707
2708         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2709                 return NT_STATUS_INVALID_PARAMETER;
2710         }
2711
2712         /* TODO - check auth_type/auth_level match. */
2713
2714         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2715         prs_copy_data_out((char *)server_response.data, reply_pdu,
2716                           phdr->auth_len);
2717
2718         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2719                                 server_response, &client_reply);
2720
2721         if (!NT_STATUS_IS_OK(status)) {
2722                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2723                           "blob failed: %s.\n", nt_errstr(status)));
2724                 return status;
2725         }
2726
2727         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2728
2729         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2730                                        state->cli->auth->auth_type,
2731                                        state->cli->auth->auth_level,
2732                                        &client_reply, &state->rpc_out);
2733         data_blob_free(&client_reply);
2734
2735         if (!NT_STATUS_IS_OK(status)) {
2736                 return status;
2737         }
2738
2739         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2740                                 (uint8_t *)prs_data_p(&state->rpc_out),
2741                                 prs_offset(&state->rpc_out));
2742         if (subreq == NULL) {
2743                 return NT_STATUS_NO_MEMORY;
2744         }
2745         subreq->async.fn = rpc_bind_auth3_write_done;
2746         subreq->async.priv = req;
2747         return NT_STATUS_OK;
2748 }
2749
2750 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2751 {
2752         struct async_req *req = talloc_get_type_abort(
2753                 subreq->async.priv, struct async_req);
2754         NTSTATUS status;
2755
2756         status = rpc_write_recv(subreq);
2757         TALLOC_FREE(subreq);
2758         if (!NT_STATUS_IS_OK(status)) {
2759                 async_req_nterror(req, status);
2760                 return;
2761         }
2762         async_req_done(req);
2763 }
2764
2765 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2766                                                     struct rpc_pipe_bind_state *state,
2767                                                     struct rpc_hdr_info *phdr,
2768                                                     prs_struct *reply_pdu)
2769 {
2770         DATA_BLOB server_spnego_response = data_blob_null;
2771         DATA_BLOB server_ntlm_response = data_blob_null;
2772         DATA_BLOB client_reply = data_blob_null;
2773         DATA_BLOB tmp_blob = data_blob_null;
2774         RPC_HDR_AUTH hdr_auth;
2775         struct async_req *subreq;
2776         NTSTATUS status;
2777
2778         if ((phdr->auth_len == 0)
2779             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2780                 return NT_STATUS_INVALID_PARAMETER;
2781         }
2782
2783         /* Process the returned NTLMSSP blob first. */
2784         if (!prs_set_offset(
2785                     reply_pdu,
2786                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2787                 return NT_STATUS_INVALID_PARAMETER;
2788         }
2789
2790         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2791                 return NT_STATUS_INVALID_PARAMETER;
2792         }
2793
2794         server_spnego_response = data_blob(NULL, phdr->auth_len);
2795         prs_copy_data_out((char *)server_spnego_response.data,
2796                           reply_pdu, phdr->auth_len);
2797
2798         /*
2799          * The server might give us back two challenges - tmp_blob is for the
2800          * second.
2801          */
2802         if (!spnego_parse_challenge(server_spnego_response,
2803                                     &server_ntlm_response, &tmp_blob)) {
2804                 data_blob_free(&server_spnego_response);
2805                 data_blob_free(&server_ntlm_response);
2806                 data_blob_free(&tmp_blob);
2807                 return NT_STATUS_INVALID_PARAMETER;
2808         }
2809
2810         /* We're finished with the server spnego response and the tmp_blob. */
2811         data_blob_free(&server_spnego_response);
2812         data_blob_free(&tmp_blob);
2813
2814         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2815                                 server_ntlm_response, &client_reply);
2816
2817         /* Finished with the server_ntlm response */
2818         data_blob_free(&server_ntlm_response);
2819
2820         if (!NT_STATUS_IS_OK(status)) {
2821                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2822                           "using server blob failed.\n"));
2823                 data_blob_free(&client_reply);
2824                 return status;
2825         }
2826
2827         /* SPNEGO wrap the client reply. */
2828         tmp_blob = spnego_gen_auth(client_reply);
2829         data_blob_free(&client_reply);
2830         client_reply = tmp_blob;
2831         tmp_blob = data_blob_null;
2832
2833         /* Now prepare the alter context pdu. */
2834         prs_init_empty(&state->rpc_out, state, MARSHALL);
2835
2836         status = create_rpc_alter_context(state->rpc_call_id,
2837                                           &state->cli->abstract_syntax,
2838                                           &state->cli->transfer_syntax,
2839                                           state->cli->auth->auth_level,
2840                                           &client_reply,
2841                                           &state->rpc_out);
2842         data_blob_free(&client_reply);
2843
2844         if (!NT_STATUS_IS_OK(status)) {
2845                 return status;
2846         }
2847
2848         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2849                                    &state->rpc_out, RPC_ALTCONTRESP);
2850         if (subreq == NULL) {
2851                 return NT_STATUS_NO_MEMORY;
2852         }
2853         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2854         subreq->async.priv = req;
2855         return NT_STATUS_OK;
2856 }
2857
2858 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2859 {
2860         struct async_req *req = talloc_get_type_abort(
2861                 subreq->async.priv, struct async_req);
2862         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2863                 req->private_data, struct rpc_pipe_bind_state);
2864         DATA_BLOB server_spnego_response = data_blob_null;
2865         DATA_BLOB tmp_blob = data_blob_null;
2866         prs_struct reply_pdu;
2867         struct rpc_hdr_info hdr;
2868         struct rpc_hdr_auth_info hdr_auth;
2869         NTSTATUS status;
2870
2871         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2872         TALLOC_FREE(subreq);
2873         if (!NT_STATUS_IS_OK(status)) {
2874                 async_req_nterror(req, status);
2875                 return;
2876         }
2877
2878         /* Get the auth blob from the reply. */
2879         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2880                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2881                           "unmarshall RPC_HDR.\n"));
2882                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2883                 return;
2884         }
2885
2886         if (!prs_set_offset(
2887                     &reply_pdu,
2888                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2889                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2890                 return;
2891         }
2892
2893         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2894                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2895                 return;
2896         }
2897
2898         server_spnego_response = data_blob(NULL, hdr.auth_len);
2899         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2900                           hdr.auth_len);
2901
2902         /* Check we got a valid auth response. */
2903         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2904                                         OID_NTLMSSP, &tmp_blob)) {
2905                 data_blob_free(&server_spnego_response);
2906                 data_blob_free(&tmp_blob);
2907                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2908                 return;
2909         }
2910
2911         data_blob_free(&server_spnego_response);
2912         data_blob_free(&tmp_blob);
2913
2914         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2915                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2916         async_req_done(req);
2917 }
2918
2919 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2920 {
2921         return async_req_simple_recv_ntstatus(req);
2922 }
2923
2924 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2925                        struct cli_pipe_auth_data *auth)
2926 {
2927         TALLOC_CTX *frame = talloc_stackframe();
2928         struct event_context *ev;
2929         struct async_req *req;
2930         NTSTATUS status = NT_STATUS_NO_MEMORY;
2931
2932         ev = event_context_init(frame);
2933         if (ev == NULL) {
2934                 goto fail;
2935         }
2936
2937         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2938         if (req == NULL) {
2939                 goto fail;
2940         }
2941
2942         while (req->state < ASYNC_REQ_DONE) {
2943                 event_loop_once(ev);
2944         }
2945
2946         status = rpc_pipe_bind_recv(req);
2947  fail:
2948         TALLOC_FREE(frame);
2949         return status;
2950 }
2951
2952 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2953                                 unsigned int timeout)
2954 {
2955         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2956
2957         if (cli == NULL) {
2958                 return 0;
2959         }
2960         return cli_set_timeout(cli, timeout);
2961 }
2962
2963 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2964 {
2965         struct cli_state *cli;
2966
2967         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2968             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2969                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2970                 return true;
2971         }
2972
2973         cli = rpc_pipe_np_smb_conn(rpc_cli);
2974         if (cli == NULL) {
2975                 return false;
2976         }
2977         E_md4hash(cli->pwd.password, nt_hash);
2978         return true;
2979 }
2980
2981 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2982                                struct cli_pipe_auth_data **presult)
2983 {
2984         struct cli_pipe_auth_data *result;
2985
2986         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2987         if (result == NULL) {
2988                 return NT_STATUS_NO_MEMORY;
2989         }
2990
2991         result->auth_type = PIPE_AUTH_TYPE_NONE;
2992         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2993
2994         result->user_name = talloc_strdup(result, "");
2995         result->domain = talloc_strdup(result, "");
2996         if ((result->user_name == NULL) || (result->domain == NULL)) {
2997                 TALLOC_FREE(result);
2998                 return NT_STATUS_NO_MEMORY;
2999         }
3000
3001         *presult = result;
3002         return NT_STATUS_OK;
3003 }
3004
3005 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3006 {
3007         ntlmssp_end(&auth->a_u.ntlmssp_state);
3008         return 0;
3009 }
3010
3011 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3012                                   enum pipe_auth_type auth_type,
3013                                   enum pipe_auth_level auth_level,
3014                                   const char *domain,
3015                                   const char *username,
3016                                   const char *password,
3017                                   struct cli_pipe_auth_data **presult)
3018 {
3019         struct cli_pipe_auth_data *result;
3020         NTSTATUS status;
3021
3022         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3023         if (result == NULL) {
3024                 return NT_STATUS_NO_MEMORY;
3025         }
3026
3027         result->auth_type = auth_type;
3028         result->auth_level = auth_level;
3029
3030         result->user_name = talloc_strdup(result, username);
3031         result->domain = talloc_strdup(result, domain);
3032         if ((result->user_name == NULL) || (result->domain == NULL)) {
3033                 status = NT_STATUS_NO_MEMORY;
3034                 goto fail;
3035         }
3036
3037         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3038         if (!NT_STATUS_IS_OK(status)) {
3039                 goto fail;
3040         }
3041
3042         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3043
3044         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3045         if (!NT_STATUS_IS_OK(status)) {
3046                 goto fail;
3047         }
3048
3049         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3050         if (!NT_STATUS_IS_OK(status)) {
3051                 goto fail;
3052         }
3053
3054         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3055         if (!NT_STATUS_IS_OK(status)) {
3056                 goto fail;
3057         }
3058
3059         /*
3060          * Turn off sign+seal to allow selected auth level to turn it back on.
3061          */
3062         result->a_u.ntlmssp_state->neg_flags &=
3063                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3064
3065         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3066                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3067         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3068                 result->a_u.ntlmssp_state->neg_flags
3069                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3070         }
3071
3072         *presult = result;
3073         return NT_STATUS_OK;
3074
3075  fail:
3076         TALLOC_FREE(result);
3077         return status;
3078 }
3079
3080 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3081                                    enum pipe_auth_level auth_level,
3082                                    const uint8_t sess_key[16],
3083                                    struct cli_pipe_auth_data **presult)
3084 {
3085         struct cli_pipe_auth_data *result;
3086
3087         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3088         if (result == NULL) {
3089                 return NT_STATUS_NO_MEMORY;
3090         }
3091
3092         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3093         result->auth_level = auth_level;
3094
3095         result->user_name = talloc_strdup(result, "");
3096         result->domain = talloc_strdup(result, domain);
3097         if ((result->user_name == NULL) || (result->domain == NULL)) {
3098                 goto fail;
3099         }
3100
3101         result->a_u.schannel_auth = talloc(result,
3102                                            struct schannel_auth_struct);
3103         if (result->a_u.schannel_auth == NULL) {
3104                 goto fail;
3105         }
3106
3107         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3108                sizeof(result->a_u.schannel_auth->sess_key));
3109         result->a_u.schannel_auth->seq_num = 0;
3110
3111         *presult = result;
3112         return NT_STATUS_OK;
3113
3114  fail:
3115         TALLOC_FREE(result);
3116         return NT_STATUS_NO_MEMORY;
3117 }
3118
3119 #ifdef HAVE_KRB5
3120 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3121 {
3122         data_blob_free(&auth->session_key);
3123         return 0;
3124 }
3125 #endif
3126
3127 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3128                                    enum pipe_auth_level auth_level,
3129                                    const char *service_princ,
3130                                    const char *username,
3131                                    const char *password,
3132                                    struct cli_pipe_auth_data **presult)
3133 {
3134 #ifdef HAVE_KRB5
3135         struct cli_pipe_auth_data *result;
3136
3137         if ((username != NULL) && (password != NULL)) {
3138                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3139                 if (ret != 0) {
3140                         return NT_STATUS_ACCESS_DENIED;
3141                 }
3142         }
3143
3144         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3145         if (result == NULL) {
3146                 return NT_STATUS_NO_MEMORY;
3147         }
3148
3149         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3150         result->auth_level = auth_level;
3151
3152         /*
3153          * Username / domain need fixing!
3154          */
3155         result->user_name = talloc_strdup(result, "");
3156         result->domain = talloc_strdup(result, "");
3157         if ((result->user_name == NULL) || (result->domain == NULL)) {
3158                 goto fail;
3159         }
3160
3161         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3162                 result, struct kerberos_auth_struct);
3163         if (result->a_u.kerberos_auth == NULL) {
3164                 goto fail;
3165         }
3166         talloc_set_destructor(result->a_u.kerberos_auth,
3167                               cli_auth_kerberos_data_destructor);
3168
3169         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3170                 result, service_princ);
3171         if (result->a_u.kerberos_auth->service_principal == NULL) {
3172                 goto fail;
3173         }
3174
3175         *presult = result;
3176         return NT_STATUS_OK;
3177
3178  fail:
3179         TALLOC_FREE(result);
3180         return NT_STATUS_NO_MEMORY;
3181 #else
3182         return NT_STATUS_NOT_SUPPORTED;
3183 #endif
3184 }
3185
3186 /**
3187  * Create an rpc pipe client struct, connecting to a tcp port.
3188  */
3189 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3190                                        uint16_t port,
3191                                        const struct ndr_syntax_id *abstract_syntax,
3192                                        struct rpc_pipe_client **presult)
3193 {
3194         struct rpc_pipe_client *result;
3195         struct sockaddr_storage addr;
3196         NTSTATUS status;
3197         int fd;
3198
3199         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3200         if (result == NULL) {
3201                 return NT_STATUS_NO_MEMORY;
3202         }
3203
3204         result->abstract_syntax = *abstract_syntax;
3205         result->transfer_syntax = ndr_transfer_syntax;
3206         result->dispatch = cli_do_rpc_ndr;
3207
3208         result->desthost = talloc_strdup(result, host);
3209         result->srv_name_slash = talloc_asprintf_strupper_m(
3210                 result, "\\\\%s", result->desthost);
3211         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3212                 status = NT_STATUS_NO_MEMORY;
3213                 goto fail;
3214         }
3215
3216         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3217         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3218
3219         if (!resolve_name(host, &addr, 0)) {
3220                 status = NT_STATUS_NOT_FOUND;
3221                 goto fail;
3222         }
3223
3224         status = open_socket_out(&addr, port, 60, &fd);
3225         if (!NT_STATUS_IS_OK(status)) {
3226                 goto fail;
3227         }
3228         set_socket_options(fd, lp_socket_options());
3229
3230         status = rpc_transport_sock_init(result, fd, &result->transport);
3231         if (!NT_STATUS_IS_OK(status)) {
3232                 close(fd);
3233                 goto fail;
3234         }
3235
3236         *presult = result;
3237         return NT_STATUS_OK;
3238
3239  fail:
3240         TALLOC_FREE(result);
3241         return status;
3242 }
3243
3244 /**
3245  * Determine the tcp port on which a dcerpc interface is listening
3246  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3247  * target host.
3248  */
3249 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3250                                       const struct ndr_syntax_id *abstract_syntax,
3251                                       uint16_t *pport)
3252 {
3253         NTSTATUS status;
3254         struct rpc_pipe_client *epm_pipe = NULL;
3255         struct cli_pipe_auth_data *auth = NULL;
3256         struct dcerpc_binding *map_binding = NULL;
3257         struct dcerpc_binding *res_binding = NULL;
3258         struct epm_twr_t *map_tower = NULL;
3259         struct epm_twr_t *res_towers = NULL;
3260         struct policy_handle *entry_handle = NULL;
3261         uint32_t num_towers = 0;
3262         uint32_t max_towers = 1;
3263         struct epm_twr_p_t towers;
3264         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3265
3266         if (pport == NULL) {
3267                 status = NT_STATUS_INVALID_PARAMETER;
3268                 goto done;
3269         }
3270
3271         /* open the connection to the endpoint mapper */
3272         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3273                                         &ndr_table_epmapper.syntax_id,
3274                                         &epm_pipe);
3275
3276         if (!NT_STATUS_IS_OK(status)) {
3277                 goto done;
3278         }
3279
3280         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3281         if (!NT_STATUS_IS_OK(status)) {
3282                 goto done;
3283         }
3284
3285         status = rpc_pipe_bind(epm_pipe, auth);
3286         if (!NT_STATUS_IS_OK(status)) {
3287                 goto done;
3288         }
3289
3290         /* create tower for asking the epmapper */
3291
3292         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3293         if (map_binding == NULL) {
3294                 status = NT_STATUS_NO_MEMORY;
3295                 goto done;
3296         }
3297
3298         map_binding->transport = NCACN_IP_TCP;
3299         map_binding->object = *abstract_syntax;
3300         map_binding->host = host; /* needed? */
3301         map_binding->endpoint = "0"; /* correct? needed? */
3302
3303         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3304         if (map_tower == NULL) {
3305                 status = NT_STATUS_NO_MEMORY;
3306                 goto done;
3307         }
3308
3309         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3310                                             &(map_tower->tower));
3311         if (!NT_STATUS_IS_OK(status)) {
3312                 goto done;
3313         }
3314
3315         /* allocate further parameters for the epm_Map call */
3316
3317         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3318         if (res_towers == NULL) {
3319                 status = NT_STATUS_NO_MEMORY;
3320                 goto done;
3321         }
3322         towers.twr = res_towers;
3323
3324         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3325         if (entry_handle == NULL) {
3326                 status = NT_STATUS_NO_MEMORY;
3327                 goto done;
3328         }
3329
3330         /* ask the endpoint mapper for the port */
3331
3332         status = rpccli_epm_Map(epm_pipe,
3333                                 tmp_ctx,
3334                                 CONST_DISCARD(struct GUID *,
3335                                               &(abstract_syntax->uuid)),
3336                                 map_tower,
3337                                 entry_handle,
3338                                 max_towers,
3339                                 &num_towers,
3340                                 &towers);
3341
3342         if (!NT_STATUS_IS_OK(status)) {
3343                 goto done;
3344         }
3345
3346         if (num_towers != 1) {
3347                 status = NT_STATUS_UNSUCCESSFUL;
3348                 goto done;
3349         }
3350
3351         /* extract the port from the answer */
3352
3353         status = dcerpc_binding_from_tower(tmp_ctx,
3354                                            &(towers.twr->tower),
3355                                            &res_binding);
3356         if (!NT_STATUS_IS_OK(status)) {
3357                 goto done;
3358         }
3359
3360         /* are further checks here necessary? */
3361         if (res_binding->transport != NCACN_IP_TCP) {
3362                 status = NT_STATUS_UNSUCCESSFUL;
3363                 goto done;
3364         }
3365
3366         *pport = (uint16_t)atoi(res_binding->endpoint);
3367
3368 done:
3369         TALLOC_FREE(tmp_ctx);
3370         return status;
3371 }
3372
3373 /**
3374  * Create a rpc pipe client struct, connecting to a host via tcp.
3375  * The port is determined by asking the endpoint mapper on the given
3376  * host.
3377  */
3378 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3379                            const struct ndr_syntax_id *abstract_syntax,
3380                            struct rpc_pipe_client **presult)
3381 {
3382         NTSTATUS status;
3383         uint16_t port = 0;
3384
3385         *presult = NULL;
3386
3387         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3388         if (!NT_STATUS_IS_OK(status)) {
3389                 goto done;
3390         }
3391
3392         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3393                                         abstract_syntax, presult);
3394
3395 done:
3396         return status;
3397 }
3398
3399 /********************************************************************
3400  Create a rpc pipe client struct, connecting to a unix domain socket
3401  ********************************************************************/
3402 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3403                                const struct ndr_syntax_id *abstract_syntax,
3404                                struct rpc_pipe_client **presult)
3405 {
3406         struct rpc_pipe_client *result;
3407         struct sockaddr_un addr;
3408         NTSTATUS status;
3409         int fd;
3410
3411         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3412         if (result == NULL) {
3413                 return NT_STATUS_NO_MEMORY;
3414         }
3415
3416         result->abstract_syntax = *abstract_syntax;
3417         result->transfer_syntax = ndr_transfer_syntax;
3418         result->dispatch = cli_do_rpc_ndr;
3419
3420         result->desthost = get_myname(result);
3421         result->srv_name_slash = talloc_asprintf_strupper_m(
3422                 result, "\\\\%s", result->desthost);
3423         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3424                 status = NT_STATUS_NO_MEMORY;
3425                 goto fail;
3426         }
3427
3428         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3429         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3430
3431         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3432         if (fd == -1) {
3433                 status = map_nt_error_from_unix(errno);
3434                 goto fail;
3435         }
3436
3437         ZERO_STRUCT(addr);
3438         addr.sun_family = AF_UNIX;
3439         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3440
3441         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3442                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3443                           strerror(errno)));
3444                 close(fd);
3445                 return map_nt_error_from_unix(errno);
3446         }
3447
3448         status = rpc_transport_sock_init(result, fd, &result->transport);
3449         if (!NT_STATUS_IS_OK(status)) {
3450                 close(fd);
3451                 goto fail;
3452         }
3453
3454         *presult = result;
3455         return NT_STATUS_OK;
3456
3457  fail:
3458         TALLOC_FREE(result);
3459         return status;
3460 }
3461
3462 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3463 {
3464         struct cli_state *cli;
3465
3466         cli = rpc_pipe_np_smb_conn(p);
3467         if (cli != NULL) {
3468                 DLIST_REMOVE(cli->pipe_list, p);
3469         }
3470         return 0;
3471 }
3472
3473 /****************************************************************************
3474  Open a named pipe over SMB to a remote server.
3475  *
3476  * CAVEAT CALLER OF THIS FUNCTION:
3477  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3478  *    so be sure that this function is called AFTER any structure (vs pointer)
3479  *    assignment of the cli.  In particular, libsmbclient does structure
3480  *    assignments of cli, which invalidates the data in the returned
3481  *    rpc_pipe_client if this function is called before the structure assignment
3482  *    of cli.
3483  * 
3484  ****************************************************************************/
3485
3486 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3487                                  const struct ndr_syntax_id *abstract_syntax,
3488                                  struct rpc_pipe_client **presult)
3489 {
3490         struct rpc_pipe_client *result;
3491         NTSTATUS status;
3492
3493         /* sanity check to protect against crashes */
3494
3495         if ( !cli ) {
3496                 return NT_STATUS_INVALID_HANDLE;
3497         }
3498
3499         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3500         if (result == NULL) {
3501                 return NT_STATUS_NO_MEMORY;
3502         }
3503
3504         result->abstract_syntax = *abstract_syntax;
3505         result->transfer_syntax = ndr_transfer_syntax;
3506         result->dispatch = cli_do_rpc_ndr;
3507         result->desthost = talloc_strdup(result, cli->desthost);
3508         result->srv_name_slash = talloc_asprintf_strupper_m(
3509                 result, "\\\\%s", result->desthost);
3510
3511         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3512         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3513
3514         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3515                 TALLOC_FREE(result);
3516                 return NT_STATUS_NO_MEMORY;
3517         }
3518
3519         status = rpc_transport_np_init(result, cli, abstract_syntax,
3520                                        &result->transport);
3521         if (!NT_STATUS_IS_OK(status)) {
3522                 TALLOC_FREE(result);
3523                 return status;
3524         }
3525
3526         DLIST_ADD(cli->pipe_list, result);
3527         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3528
3529         *presult = result;
3530         return NT_STATUS_OK;
3531 }
3532
3533 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3534                              struct rpc_cli_smbd_conn *conn,
3535                              const struct ndr_syntax_id *syntax,
3536                              struct rpc_pipe_client **presult)
3537 {
3538         struct rpc_pipe_client *result;
3539         struct cli_pipe_auth_data *auth;
3540         NTSTATUS status;
3541
3542         result = talloc(mem_ctx, struct rpc_pipe_client);
3543         if (result == NULL) {
3544                 return NT_STATUS_NO_MEMORY;
3545         }
3546         result->abstract_syntax = *syntax;
3547         result->transfer_syntax = ndr_transfer_syntax;
3548         result->dispatch = cli_do_rpc_ndr;
3549         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3550         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3551
3552         result->desthost = talloc_strdup(result, global_myname());
3553         result->srv_name_slash = talloc_asprintf_strupper_m(
3554                 result, "\\\\%s", global_myname());
3555         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3556                 TALLOC_FREE(result);
3557                 return NT_STATUS_NO_MEMORY;
3558         }
3559
3560         status = rpc_transport_smbd_init(result, conn, syntax,
3561                                          &result->transport);
3562         if (!NT_STATUS_IS_OK(status)) {
3563                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3564                           nt_errstr(status)));
3565                 TALLOC_FREE(result);
3566                 return status;
3567         }
3568
3569         status = rpccli_anon_bind_data(result, &auth);
3570         if (!NT_STATUS_IS_OK(status)) {
3571                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3572                           nt_errstr(status)));
3573                 TALLOC_FREE(result);
3574                 return status;
3575         }
3576
3577         status = rpc_pipe_bind(result, auth);
3578         if (!NT_STATUS_IS_OK(status)) {
3579                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3580                 TALLOC_FREE(result);
3581                 return status;
3582         }
3583
3584         *presult = result;
3585         return NT_STATUS_OK;
3586 }
3587
3588 /****************************************************************************
3589  Open a pipe to a remote server.
3590  ****************************************************************************/
3591
3592 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3593                                   const struct ndr_syntax_id *interface,
3594                                   struct rpc_pipe_client **presult)
3595 {
3596         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3597                 /*
3598                  * We should have a better way to figure out this drsuapi
3599                  * speciality...
3600                  */
3601                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3602                                          presult);
3603         }
3604
3605         return rpc_pipe_open_np(cli, interface, presult);
3606 }
3607
3608 /****************************************************************************
3609  Open a named pipe to an SMB server and bind anonymously.
3610  ****************************************************************************/
3611
3612 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3613                                   const struct ndr_syntax_id *interface,
3614                                   struct rpc_pipe_client **presult)
3615 {
3616         struct rpc_pipe_client *result;
3617         struct cli_pipe_auth_data *auth;
3618         NTSTATUS status;
3619
3620         status = cli_rpc_pipe_open(cli, interface, &result);
3621         if (!NT_STATUS_IS_OK(status)) {
3622                 return status;
3623         }
3624
3625         status = rpccli_anon_bind_data(result, &auth);
3626         if (!NT_STATUS_IS_OK(status)) {
3627                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3628                           nt_errstr(status)));
3629                 TALLOC_FREE(result);
3630                 return status;
3631         }
3632
3633         /*
3634          * This is a bit of an abstraction violation due to the fact that an
3635          * anonymous bind on an authenticated SMB inherits the user/domain
3636          * from the enclosing SMB creds
3637          */
3638
3639         TALLOC_FREE(auth->user_name);
3640         TALLOC_FREE(auth->domain);
3641
3642         auth->user_name = talloc_strdup(auth, cli->user_name);
3643         auth->domain = talloc_strdup(auth, cli->domain);
3644         auth->user_session_key = data_blob_talloc(auth,
3645                 cli->user_session_key.data,
3646                 cli->user_session_key.length);
3647
3648         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3649                 TALLOC_FREE(result);
3650                 return NT_STATUS_NO_MEMORY;
3651         }
3652
3653         status = rpc_pipe_bind(result, auth);
3654         if (!NT_STATUS_IS_OK(status)) {
3655                 int lvl = 0;
3656                 if (ndr_syntax_id_equal(interface,
3657                                         &ndr_table_dssetup.syntax_id)) {
3658                         /* non AD domains just don't have this pipe, avoid
3659                          * level 0 statement in that case - gd */
3660                         lvl = 3;
3661                 }
3662                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3663                             "%s failed with error %s\n",
3664                             get_pipe_name_from_iface(interface),
3665                             nt_errstr(status) ));
3666                 TALLOC_FREE(result);
3667                 return status;
3668         }
3669
3670         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3671                   "%s and bound anonymously.\n",
3672                   get_pipe_name_from_iface(interface), cli->desthost));
3673
3674         *presult = result;
3675         return NT_STATUS_OK;
3676 }
3677
3678 /****************************************************************************
3679  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3680  ****************************************************************************/
3681
3682 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3683                                                    const struct ndr_syntax_id *interface,
3684                                                    enum pipe_auth_type auth_type,
3685                                                    enum pipe_auth_level auth_level,
3686                                                    const char *domain,
3687                                                    const char *username,
3688                                                    const char *password,
3689                                                    struct rpc_pipe_client **presult)
3690 {
3691         struct rpc_pipe_client *result;
3692         struct cli_pipe_auth_data *auth;
3693         NTSTATUS status;
3694
3695         status = cli_rpc_pipe_open(cli, interface, &result);
3696         if (!NT_STATUS_IS_OK(status)) {
3697                 return status;
3698         }
3699
3700         status = rpccli_ntlmssp_bind_data(
3701                 result, auth_type, auth_level, domain, username,
3702                 cli->pwd.null_pwd ? NULL : password, &auth);
3703         if (!NT_STATUS_IS_OK(status)) {
3704                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3705                           nt_errstr(status)));
3706                 goto err;
3707         }
3708
3709         status = rpc_pipe_bind(result, auth);
3710         if (!NT_STATUS_IS_OK(status)) {
3711                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3712                         nt_errstr(status) ));
3713                 goto err;
3714         }
3715
3716         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3717                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3718                   get_pipe_name_from_iface(interface), cli->desthost, domain,
3719                   username ));
3720
3721         *presult = result;
3722         return NT_STATUS_OK;
3723
3724   err:
3725
3726         TALLOC_FREE(result);
3727         return status;
3728 }
3729
3730 /****************************************************************************
3731  External interface.
3732  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3733  ****************************************************************************/
3734
3735 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3736                                    const struct ndr_syntax_id *interface,
3737                                    enum pipe_auth_level auth_level,
3738                                    const char *domain,
3739                                    const char *username,
3740                                    const char *password,
3741                                    struct rpc_pipe_client **presult)
3742 {
3743         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3744                                                 interface,
3745                                                 PIPE_AUTH_TYPE_NTLMSSP,
3746                                                 auth_level,
3747                                                 domain,
3748                                                 username,
3749                                                 password,
3750                                                 presult);
3751 }
3752
3753 /****************************************************************************
3754  External interface.
3755  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3756  ****************************************************************************/
3757
3758 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3759                                           const struct ndr_syntax_id *interface,
3760                                           enum pipe_auth_level auth_level,
3761                                           const char *domain,
3762                                           const char *username,
3763                                           const char *password,
3764                                           struct rpc_pipe_client **presult)
3765 {
3766         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3767                                                 interface,
3768                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3769                                                 auth_level,
3770                                                 domain,
3771                                                 username,
3772                                                 password,
3773                                                 presult);
3774 }
3775
3776 /****************************************************************************
3777   Get a the schannel session key out of an already opened netlogon pipe.
3778  ****************************************************************************/
3779 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3780                                                 struct cli_state *cli,
3781                                                 const char *domain,
3782                                                 uint32 *pneg_flags)
3783 {
3784         uint32 sec_chan_type = 0;
3785         unsigned char machine_pwd[16];
3786         const char *machine_account;
3787         NTSTATUS status;
3788
3789         /* Get the machine account credentials from secrets.tdb. */
3790         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3791                                &sec_chan_type))
3792         {
3793                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3794                         "trust account password for domain '%s'\n",
3795                         domain));
3796                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3797         }
3798
3799         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3800                                         cli->desthost, /* server name */
3801                                         domain,        /* domain */
3802                                         global_myname(), /* client name */
3803                                         machine_account, /* machine account name */
3804                                         machine_pwd,
3805                                         sec_chan_type,
3806                                         pneg_flags);
3807
3808         if (!NT_STATUS_IS_OK(status)) {
3809                 DEBUG(3, ("get_schannel_session_key_common: "
3810                           "rpccli_netlogon_setup_creds failed with result %s "
3811                           "to server %s, domain %s, machine account %s.\n",
3812                           nt_errstr(status), cli->desthost, domain,
3813                           machine_account ));
3814                 return status;
3815         }
3816
3817         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3818                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3819                         cli->desthost));
3820                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3821         }
3822
3823         return NT_STATUS_OK;;
3824 }
3825
3826 /****************************************************************************
3827  Open a netlogon pipe and get the schannel session key.
3828  Now exposed to external callers.
3829  ****************************************************************************/
3830
3831
3832 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3833                                   const char *domain,
3834                                   uint32 *pneg_flags,
3835                                   struct rpc_pipe_client **presult)
3836 {
3837         struct rpc_pipe_client *netlogon_pipe = NULL;
3838         NTSTATUS status;
3839
3840         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3841                                           &netlogon_pipe);
3842         if (!NT_STATUS_IS_OK(status)) {
3843                 return status;
3844         }
3845
3846         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3847                                                  pneg_flags);
3848         if (!NT_STATUS_IS_OK(status)) {
3849                 TALLOC_FREE(netlogon_pipe);
3850                 return status;
3851         }
3852
3853         *presult = netlogon_pipe;
3854         return NT_STATUS_OK;
3855 }
3856
3857 /****************************************************************************
3858  External interface.
3859  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3860  using session_key. sign and seal.
3861  ****************************************************************************/
3862
3863 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3864                                              const struct ndr_syntax_id *interface,
3865                                              enum pipe_auth_level auth_level,
3866                                              const char *domain,
3867                                              const struct dcinfo *pdc,
3868                                              struct rpc_pipe_client **presult)
3869 {
3870         struct rpc_pipe_client *result;
3871         struct cli_pipe_auth_data *auth;
3872         NTSTATUS status;
3873
3874         status = cli_rpc_pipe_open(cli, interface, &result);
3875         if (!NT_STATUS_IS_OK(status)) {
3876                 return status;
3877         }
3878
3879         status = rpccli_schannel_bind_data(result, domain, auth_level,
3880                                            pdc->sess_key, &auth);
3881         if (!NT_STATUS_IS_OK(status)) {
3882                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3883                           nt_errstr(status)));
3884                 TALLOC_FREE(result);
3885                 return status;
3886         }
3887
3888         status = rpc_pipe_bind(result, auth);
3889         if (!NT_STATUS_IS_OK(status)) {
3890                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3891                           "cli_rpc_pipe_bind failed with error %s\n",
3892                           nt_errstr(status) ));
3893                 TALLOC_FREE(result);
3894                 return status;
3895         }
3896
3897         /*
3898          * The credentials on a new netlogon pipe are the ones we are passed
3899          * in - copy them over.
3900          */
3901         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3902         if (result->dc == NULL) {
3903                 DEBUG(0, ("talloc failed\n"));
3904                 TALLOC_FREE(result);
3905                 return NT_STATUS_NO_MEMORY;
3906         }
3907
3908         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3909                   "for domain %s and bound using schannel.\n",
3910                   get_pipe_name_from_iface(interface),
3911                   cli->desthost, domain ));
3912
3913         *presult = result;
3914         return NT_STATUS_OK;
3915 }
3916
3917 /****************************************************************************
3918  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3919  Fetch the session key ourselves using a temporary netlogon pipe. This
3920  version uses an ntlmssp auth bound netlogon pipe to get the key.
3921  ****************************************************************************/
3922
3923 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3924                                                       const char *domain,
3925                                                       const char *username,
3926                                                       const char *password,
3927                                                       uint32 *pneg_flags,
3928                                                       struct rpc_pipe_client **presult)
3929 {
3930         struct rpc_pipe_client *netlogon_pipe = NULL;
3931         NTSTATUS status;
3932
3933         status = cli_rpc_pipe_open_spnego_ntlmssp(
3934                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3935                 domain, username, password, &netlogon_pipe);
3936         if (!NT_STATUS_IS_OK(status)) {
3937                 return status;
3938         }
3939
3940         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3941                                                  pneg_flags);
3942         if (!NT_STATUS_IS_OK(status)) {
3943                 TALLOC_FREE(netlogon_pipe);
3944                 return status;
3945         }
3946
3947         *presult = netlogon_pipe;
3948         return NT_STATUS_OK;
3949 }
3950
3951 /****************************************************************************
3952  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3953  Fetch the session key ourselves using a temporary netlogon pipe. This version
3954  uses an ntlmssp bind to get the session key.
3955  ****************************************************************************/
3956
3957 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3958                                                  const struct ndr_syntax_id *interface,
3959                                                  enum pipe_auth_level auth_level,
3960                                                  const char *domain,
3961                                                  const char *username,
3962                                                  const char *password,
3963                                                  struct rpc_pipe_client **presult)
3964 {
3965         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3966         struct rpc_pipe_client *netlogon_pipe = NULL;
3967         struct rpc_pipe_client *result = NULL;
3968         NTSTATUS status;
3969
3970         status = get_schannel_session_key_auth_ntlmssp(
3971                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3972         if (!NT_STATUS_IS_OK(status)) {
3973                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3974                         "key from server %s for domain %s.\n",
3975                         cli->desthost, domain ));
3976                 return status;
3977         }
3978
3979         status = cli_rpc_pipe_open_schannel_with_key(
3980                 cli, interface, auth_level, domain, netlogon_pipe->dc,
3981                 &result);
3982
3983         /* Now we've bound using the session key we can close the netlog pipe. */
3984         TALLOC_FREE(netlogon_pipe);
3985
3986         if (NT_STATUS_IS_OK(status)) {
3987                 *presult = result;
3988         }
3989         return status;
3990 }
3991
3992 /****************************************************************************
3993  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3994  Fetch the session key ourselves using a temporary netlogon pipe.
3995  ****************************************************************************/
3996
3997 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
3998                                     const struct ndr_syntax_id *interface,
3999                                     enum pipe_auth_level auth_level,
4000                                     const char *domain,
4001                                     struct rpc_pipe_client **presult)
4002 {
4003         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4004         struct rpc_pipe_client *netlogon_pipe = NULL;
4005         struct rpc_pipe_client *result = NULL;
4006         NTSTATUS status;
4007
4008         status = get_schannel_session_key(cli, domain, &neg_flags,
4009                                           &netlogon_pipe);
4010         if (!NT_STATUS_IS_OK(status)) {
4011                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4012                         "key from server %s for domain %s.\n",
4013                         cli->desthost, domain ));
4014                 return status;
4015         }
4016
4017         status = cli_rpc_pipe_open_schannel_with_key(
4018                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4019                 &result);
4020
4021         /* Now we've bound using the session key we can close the netlog pipe. */
4022         TALLOC_FREE(netlogon_pipe);
4023
4024         if (NT_STATUS_IS_OK(status)) {
4025                 *presult = result;
4026         }
4027
4028         return NT_STATUS_OK;
4029 }
4030
4031 /****************************************************************************
4032  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4033  The idea is this can be called with service_princ, username and password all
4034  NULL so long as the caller has a TGT.
4035  ****************************************************************************/
4036
4037 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4038                                 const struct ndr_syntax_id *interface,
4039                                 enum pipe_auth_level auth_level,
4040                                 const char *service_princ,
4041                                 const char *username,
4042                                 const char *password,
4043                                 struct rpc_pipe_client **presult)
4044 {
4045 #ifdef HAVE_KRB5
4046         struct rpc_pipe_client *result;
4047         struct cli_pipe_auth_data *auth;
4048         NTSTATUS status;
4049
4050         status = cli_rpc_pipe_open(cli, interface, &result);
4051         if (!NT_STATUS_IS_OK(status)) {
4052                 return status;
4053         }
4054
4055         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4056                                            username, password, &auth);
4057         if (!NT_STATUS_IS_OK(status)) {
4058                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4059                           nt_errstr(status)));
4060                 TALLOC_FREE(result);
4061                 return status;
4062         }
4063
4064         status = rpc_pipe_bind(result, auth);
4065         if (!NT_STATUS_IS_OK(status)) {
4066                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4067                           "with error %s\n", nt_errstr(status)));
4068                 TALLOC_FREE(result);
4069                 return status;
4070         }
4071
4072         *presult = result;
4073         return NT_STATUS_OK;
4074 #else
4075         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4076         return NT_STATUS_NOT_IMPLEMENTED;
4077 #endif
4078 }
4079
4080 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4081                              struct rpc_pipe_client *cli,
4082                              DATA_BLOB *session_key)
4083 {
4084         if (!session_key || !cli) {
4085                 return NT_STATUS_INVALID_PARAMETER;
4086         }
4087
4088         if (!cli->auth) {
4089                 return NT_STATUS_INVALID_PARAMETER;
4090         }
4091
4092         switch (cli->auth->auth_type) {
4093                 case PIPE_AUTH_TYPE_SCHANNEL:
4094                         *session_key = data_blob_talloc(mem_ctx,
4095                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4096                         break;
4097                 case PIPE_AUTH_TYPE_NTLMSSP:
4098                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4099                         *session_key = data_blob_talloc(mem_ctx,
4100                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4101                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4102                         break;
4103                 case PIPE_AUTH_TYPE_KRB5:
4104                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4105                         *session_key = data_blob_talloc(mem_ctx,
4106                                 cli->auth->a_u.kerberos_auth->session_key.data,
4107                                 cli->auth->a_u.kerberos_auth->session_key.length);
4108                         break;
4109                 case PIPE_AUTH_TYPE_NONE:
4110                         *session_key = data_blob_talloc(mem_ctx,
4111                                 cli->auth->user_session_key.data,
4112                                 cli->auth->user_session_key.length);
4113                         break;
4114                 default:
4115                         return NT_STATUS_NO_USER_SESSION_KEY;
4116         }
4117
4118         return NT_STATUS_OK;
4119 }