Convert get_complete_frag to tevent_req
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
85 {
86         char *guid_str;
87         const char *result;
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
102         if (guid_str == NULL) {
103                 return NULL;
104         }
105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106                                  (int)interface->if_version);
107         TALLOC_FREE(guid_str);
108
109         if (result == NULL) {
110                 return "PIPE";
111         }
112         return result;
113 }
114
115 /********************************************************************
116  Map internal value to wire value.
117  ********************************************************************/
118
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
120 {
121         switch (auth_type) {
122
123         case PIPE_AUTH_TYPE_NONE:
124                 return RPC_ANONYMOUS_AUTH_TYPE;
125
126         case PIPE_AUTH_TYPE_NTLMSSP:
127                 return RPC_NTLMSSP_AUTH_TYPE;
128
129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131                 return RPC_SPNEGO_AUTH_TYPE;
132
133         case PIPE_AUTH_TYPE_SCHANNEL:
134                 return RPC_SCHANNEL_AUTH_TYPE;
135
136         case PIPE_AUTH_TYPE_KRB5:
137                 return RPC_KRB5_AUTH_TYPE;
138
139         default:
140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141                         "auth type %u\n",
142                         (unsigned int)auth_type ));
143                 break;
144         }
145         return -1;
146 }
147
148 /********************************************************************
149  Pipe description for a DEBUG
150  ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152                                    struct rpc_pipe_client *cli)
153 {
154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155         if (result == NULL) {
156                 return "pipe";
157         }
158         return result;
159 }
160
161 /********************************************************************
162  Rpc pipe call id.
163  ********************************************************************/
164
165 static uint32 get_rpc_call_id(void)
166 {
167         static uint32 call_id = 0;
168         return ++call_id;
169 }
170
171 /*
172  * Realloc pdu to have a least "size" bytes
173  */
174
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
176 {
177         size_t extra_size;
178
179         if (prs_data_size(pdu) >= size) {
180                 return true;
181         }
182
183         extra_size = size - prs_data_size(pdu);
184
185         if (!prs_force_grow(pdu, extra_size)) {
186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187                           "%d bytes.\n", (int)extra_size));
188                 return false;
189         }
190
191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192                   (int)extra_size, prs_data_size(pdu)));
193         return true;
194 }
195
196
197 /*******************************************************************
198  Use SMBreadX to get rest of one fragment's worth of rpc data.
199  Reads the whole size or give an error message
200  ********************************************************************/
201
202 struct rpc_read_state {
203         struct event_context *ev;
204         struct rpc_cli_transport *transport;
205         uint8_t *data;
206         size_t size;
207         size_t num_read;
208 };
209
210 static void rpc_read_done(struct async_req *subreq);
211
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213                                         struct event_context *ev,
214                                         struct rpc_cli_transport *transport,
215                                         uint8_t *data, size_t size)
216 {
217         struct tevent_req *req;
218         struct async_req *subreq;
219         struct rpc_read_state *state;
220
221         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
222         if (req == NULL) {
223                 return NULL;
224         }
225         state->ev = ev;
226         state->transport = transport;
227         state->data = data;
228         state->size = size;
229         state->num_read = 0;
230
231         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232
233         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
234                                       transport->priv);
235         if (subreq == NULL) {
236                 goto fail;
237         }
238         subreq->async.fn = rpc_read_done;
239         subreq->async.priv = req;
240         return req;
241
242  fail:
243         TALLOC_FREE(req);
244         return NULL;
245 }
246
247 static void rpc_read_done(struct async_req *subreq)
248 {
249         struct tevent_req *req = talloc_get_type_abort(
250                 subreq->async.priv, struct tevent_req);
251         struct rpc_read_state *state = tevent_req_data(
252                 req, struct rpc_read_state);
253         NTSTATUS status;
254         ssize_t received;
255
256         status = state->transport->read_recv(subreq, &received);
257         TALLOC_FREE(subreq);
258         if (!NT_STATUS_IS_OK(status)) {
259                 tevent_req_nterror(req, status);
260                 return;
261         }
262
263         state->num_read += received;
264         if (state->num_read == state->size) {
265                 tevent_req_done(req);
266                 return;
267         }
268
269         subreq = state->transport->read_send(state, state->ev,
270                                              state->data + state->num_read,
271                                              state->size - state->num_read,
272                                              state->transport->priv);
273         if (tevent_req_nomem(subreq, req)) {
274                 return;
275         }
276         subreq->async.fn = rpc_read_done;
277         subreq->async.priv = req;
278 }
279
280 static NTSTATUS rpc_read_recv(struct tevent_req *req)
281 {
282         return tevent_req_simple_recv_ntstatus(req);
283 }
284
285 struct rpc_write_state {
286         struct event_context *ev;
287         struct rpc_cli_transport *transport;
288         const uint8_t *data;
289         size_t size;
290         size_t num_written;
291 };
292
293 static void rpc_write_done(struct async_req *subreq);
294
295 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
296                                          struct event_context *ev,
297                                          struct rpc_cli_transport *transport,
298                                          const uint8_t *data, size_t size)
299 {
300         struct tevent_req *req;
301         struct async_req *subreq;
302         struct rpc_write_state *state;
303
304         req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
305         if (req == NULL) {
306                 return NULL;
307         }
308         state->ev = ev;
309         state->transport = transport;
310         state->data = data;
311         state->size = size;
312         state->num_written = 0;
313
314         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
315
316         subreq = transport->write_send(state, ev, data, size, transport->priv);
317         if (subreq == NULL) {
318                 goto fail;
319         }
320         subreq->async.fn = rpc_write_done;
321         subreq->async.priv = req;
322         return req;
323  fail:
324         TALLOC_FREE(req);
325         return NULL;
326 }
327
328 static void rpc_write_done(struct async_req *subreq)
329 {
330         struct tevent_req *req = talloc_get_type_abort(
331                 subreq->async.priv, struct tevent_req);
332         struct rpc_write_state *state = tevent_req_data(
333                 req, struct rpc_write_state);
334         NTSTATUS status;
335         ssize_t written;
336
337         status = state->transport->write_recv(subreq, &written);
338         TALLOC_FREE(subreq);
339         if (!NT_STATUS_IS_OK(status)) {
340                 tevent_req_nterror(req, status);
341                 return;
342         }
343
344         state->num_written += written;
345
346         if (state->num_written == state->size) {
347                 tevent_req_done(req);
348                 return;
349         }
350
351         subreq = state->transport->write_send(state, state->ev,
352                                               state->data + state->num_written,
353                                               state->size - state->num_written,
354                                               state->transport->priv);
355         if (tevent_req_nomem(subreq, req)) {
356                 return;
357         }
358         subreq->async.fn = rpc_write_done;
359         subreq->async.priv = req;
360 }
361
362 static NTSTATUS rpc_write_recv(struct tevent_req *req)
363 {
364         return tevent_req_simple_recv_ntstatus(req);
365 }
366
367
368 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
369                                  struct rpc_hdr_info *prhdr,
370                                  prs_struct *pdu)
371 {
372         /*
373          * This next call sets the endian bit correctly in current_pdu. We
374          * will propagate this to rbuf later.
375          */
376
377         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
378                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
379                 return NT_STATUS_BUFFER_TOO_SMALL;
380         }
381
382         if (prhdr->frag_len > cli->max_recv_frag) {
383                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
384                           " we only allow %d\n", (int)prhdr->frag_len,
385                           (int)cli->max_recv_frag));
386                 return NT_STATUS_BUFFER_TOO_SMALL;
387         }
388
389         return NT_STATUS_OK;
390 }
391
392 /****************************************************************************
393  Try and get a PDU's worth of data from current_pdu. If not, then read more
394  from the wire.
395  ****************************************************************************/
396
397 struct get_complete_frag_state {
398         struct event_context *ev;
399         struct rpc_pipe_client *cli;
400         struct rpc_hdr_info *prhdr;
401         prs_struct *pdu;
402 };
403
404 static void get_complete_frag_got_header(struct tevent_req *subreq);
405 static void get_complete_frag_got_rest(struct tevent_req *subreq);
406
407 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
408                                                  struct event_context *ev,
409                                                  struct rpc_pipe_client *cli,
410                                                  struct rpc_hdr_info *prhdr,
411                                                  prs_struct *pdu)
412 {
413         struct tevent_req *req, *subreq;
414         struct get_complete_frag_state *state;
415         uint32_t pdu_len;
416         NTSTATUS status;
417
418         req = tevent_req_create(mem_ctx, &state,
419                                 struct get_complete_frag_state);
420         if (req == NULL) {
421                 return NULL;
422         }
423         state->ev = ev;
424         state->cli = cli;
425         state->prhdr = prhdr;
426         state->pdu = pdu;
427
428         pdu_len = prs_data_size(pdu);
429         if (pdu_len < RPC_HEADER_LEN) {
430                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
431                         status = NT_STATUS_NO_MEMORY;
432                         goto post_status;
433                 }
434                 subreq = rpc_read_send(
435                         state, state->ev,
436                         state->cli->transport,
437                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
438                         RPC_HEADER_LEN - pdu_len);
439                 if (subreq == NULL) {
440                         status = NT_STATUS_NO_MEMORY;
441                         goto post_status;
442                 }
443                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
444                                         req);
445                 return req;
446         }
447
448         status = parse_rpc_header(cli, prhdr, pdu);
449         if (!NT_STATUS_IS_OK(status)) {
450                 goto post_status;
451         }
452
453         /*
454          * Ensure we have frag_len bytes of data.
455          */
456         if (pdu_len < prhdr->frag_len) {
457                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
458                         status = NT_STATUS_NO_MEMORY;
459                         goto post_status;
460                 }
461                 subreq = rpc_read_send(state, state->ev,
462                                        state->cli->transport,
463                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
464                                        prhdr->frag_len - pdu_len);
465                 if (subreq == NULL) {
466                         status = NT_STATUS_NO_MEMORY;
467                         goto post_status;
468                 }
469                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
470                                         req);
471                 return req;
472         }
473
474         status = NT_STATUS_OK;
475  post_status:
476         if (NT_STATUS_IS_OK(status)) {
477                 tevent_req_done(req);
478         } else {
479                 tevent_req_nterror(req, status);
480         }
481         return tevent_req_post(req, ev);
482 }
483
484 static void get_complete_frag_got_header(struct tevent_req *subreq)
485 {
486         struct tevent_req *req = tevent_req_callback_data(
487                 subreq, struct tevent_req);
488         struct get_complete_frag_state *state = tevent_req_data(
489                 req, struct get_complete_frag_state);
490         NTSTATUS status;
491
492         status = rpc_read_recv(subreq);
493         TALLOC_FREE(subreq);
494         if (!NT_STATUS_IS_OK(status)) {
495                 tevent_req_nterror(req, status);
496                 return;
497         }
498
499         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
500         if (!NT_STATUS_IS_OK(status)) {
501                 tevent_req_nterror(req, status);
502                 return;
503         }
504
505         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
506                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
507                 return;
508         }
509
510         /*
511          * We're here in this piece of code because we've read exactly
512          * RPC_HEADER_LEN bytes into state->pdu.
513          */
514
515         subreq = rpc_read_send(
516                 state, state->ev, state->cli->transport,
517                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
518                 state->prhdr->frag_len - RPC_HEADER_LEN);
519         if (tevent_req_nomem(subreq, req)) {
520                 return;
521         }
522         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
523 }
524
525 static void get_complete_frag_got_rest(struct tevent_req *subreq)
526 {
527         struct tevent_req *req = tevent_req_callback_data(
528                 subreq, struct tevent_req);
529         NTSTATUS status;
530
531         status = rpc_read_recv(subreq);
532         TALLOC_FREE(subreq);
533         if (!NT_STATUS_IS_OK(status)) {
534                 tevent_req_nterror(req, status);
535                 return;
536         }
537         tevent_req_done(req);
538 }
539
540 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
541 {
542         return tevent_req_simple_recv_ntstatus(req);
543 }
544
545 /****************************************************************************
546  NTLMSSP specific sign/seal.
547  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
548  In fact I should probably abstract these into identical pieces of code... JRA.
549  ****************************************************************************/
550
551 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
552                                 prs_struct *current_pdu,
553                                 uint8 *p_ss_padding_len)
554 {
555         RPC_HDR_AUTH auth_info;
556         uint32 save_offset = prs_offset(current_pdu);
557         uint32 auth_len = prhdr->auth_len;
558         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
559         unsigned char *data = NULL;
560         size_t data_len;
561         unsigned char *full_packet_data = NULL;
562         size_t full_packet_data_len;
563         DATA_BLOB auth_blob;
564         NTSTATUS status;
565
566         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
567             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
568                 return NT_STATUS_OK;
569         }
570
571         if (!ntlmssp_state) {
572                 return NT_STATUS_INVALID_PARAMETER;
573         }
574
575         /* Ensure there's enough data for an authenticated response. */
576         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
577                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
578                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
579                         (unsigned int)auth_len ));
580                 return NT_STATUS_BUFFER_TOO_SMALL;
581         }
582
583         /*
584          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
585          * after the RPC header.
586          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
587          * functions as NTLMv2 checks the rpc headers also.
588          */
589
590         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
591         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
592
593         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
594         full_packet_data_len = prhdr->frag_len - auth_len;
595
596         /* Pull the auth header and the following data into a blob. */
597         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
598                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
599                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
600                 return NT_STATUS_BUFFER_TOO_SMALL;
601         }
602
603         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
604                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
605                 return NT_STATUS_BUFFER_TOO_SMALL;
606         }
607
608         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
609         auth_blob.length = auth_len;
610
611         switch (cli->auth->auth_level) {
612                 case PIPE_AUTH_LEVEL_PRIVACY:
613                         /* Data is encrypted. */
614                         status = ntlmssp_unseal_packet(ntlmssp_state,
615                                                         data, data_len,
616                                                         full_packet_data,
617                                                         full_packet_data_len,
618                                                         &auth_blob);
619                         if (!NT_STATUS_IS_OK(status)) {
620                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
621                                         "packet from %s. Error was %s.\n",
622                                         rpccli_pipe_txt(debug_ctx(), cli),
623                                         nt_errstr(status) ));
624                                 return status;
625                         }
626                         break;
627                 case PIPE_AUTH_LEVEL_INTEGRITY:
628                         /* Data is signed. */
629                         status = ntlmssp_check_packet(ntlmssp_state,
630                                                         data, data_len,
631                                                         full_packet_data,
632                                                         full_packet_data_len,
633                                                         &auth_blob);
634                         if (!NT_STATUS_IS_OK(status)) {
635                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
636                                         "packet from %s. Error was %s.\n",
637                                         rpccli_pipe_txt(debug_ctx(), cli),
638                                         nt_errstr(status) ));
639                                 return status;
640                         }
641                         break;
642                 default:
643                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
644                                   "auth level %d\n", cli->auth->auth_level));
645                         return NT_STATUS_INVALID_INFO_CLASS;
646         }
647
648         /*
649          * Return the current pointer to the data offset.
650          */
651
652         if(!prs_set_offset(current_pdu, save_offset)) {
653                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
654                         (unsigned int)save_offset ));
655                 return NT_STATUS_BUFFER_TOO_SMALL;
656         }
657
658         /*
659          * Remember the padding length. We must remove it from the real data
660          * stream once the sign/seal is done.
661          */
662
663         *p_ss_padding_len = auth_info.auth_pad_len;
664
665         return NT_STATUS_OK;
666 }
667
668 /****************************************************************************
669  schannel specific sign/seal.
670  ****************************************************************************/
671
672 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
673                                 prs_struct *current_pdu,
674                                 uint8 *p_ss_padding_len)
675 {
676         RPC_HDR_AUTH auth_info;
677         RPC_AUTH_SCHANNEL_CHK schannel_chk;
678         uint32 auth_len = prhdr->auth_len;
679         uint32 save_offset = prs_offset(current_pdu);
680         struct schannel_auth_struct *schannel_auth =
681                 cli->auth->a_u.schannel_auth;
682         uint32 data_len;
683
684         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
685             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
686                 return NT_STATUS_OK;
687         }
688
689         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
690                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
691                 return NT_STATUS_INVALID_PARAMETER;
692         }
693
694         if (!schannel_auth) {
695                 return NT_STATUS_INVALID_PARAMETER;
696         }
697
698         /* Ensure there's enough data for an authenticated response. */
699         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
700                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
701                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
702                         (unsigned int)auth_len ));
703                 return NT_STATUS_INVALID_PARAMETER;
704         }
705
706         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
707
708         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
709                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
710                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
711                 return NT_STATUS_BUFFER_TOO_SMALL;
712         }
713
714         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
715                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
716                 return NT_STATUS_BUFFER_TOO_SMALL;
717         }
718
719         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
720                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
721                         auth_info.auth_type));
722                 return NT_STATUS_BUFFER_TOO_SMALL;
723         }
724
725         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
726                                 &schannel_chk, current_pdu, 0)) {
727                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
728                 return NT_STATUS_BUFFER_TOO_SMALL;
729         }
730
731         if (!schannel_decode(schannel_auth,
732                         cli->auth->auth_level,
733                         SENDER_IS_ACCEPTOR,
734                         &schannel_chk,
735                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
736                         data_len)) {
737                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
738                                 "Connection to %s.\n",
739                                 rpccli_pipe_txt(debug_ctx(), cli)));
740                 return NT_STATUS_INVALID_PARAMETER;
741         }
742
743         /* The sequence number gets incremented on both send and receive. */
744         schannel_auth->seq_num++;
745
746         /*
747          * Return the current pointer to the data offset.
748          */
749
750         if(!prs_set_offset(current_pdu, save_offset)) {
751                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
752                         (unsigned int)save_offset ));
753                 return NT_STATUS_BUFFER_TOO_SMALL;
754         }
755
756         /*
757          * Remember the padding length. We must remove it from the real data
758          * stream once the sign/seal is done.
759          */
760
761         *p_ss_padding_len = auth_info.auth_pad_len;
762
763         return NT_STATUS_OK;
764 }
765
766 /****************************************************************************
767  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
768  ****************************************************************************/
769
770 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
771                                 prs_struct *current_pdu,
772                                 uint8 *p_ss_padding_len)
773 {
774         NTSTATUS ret = NT_STATUS_OK;
775
776         /* Paranioa checks for auth_len. */
777         if (prhdr->auth_len) {
778                 if (prhdr->auth_len > prhdr->frag_len) {
779                         return NT_STATUS_INVALID_PARAMETER;
780                 }
781
782                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
783                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
784                         /* Integer wrap attempt. */
785                         return NT_STATUS_INVALID_PARAMETER;
786                 }
787         }
788
789         /*
790          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
791          */
792
793         switch(cli->auth->auth_type) {
794                 case PIPE_AUTH_TYPE_NONE:
795                         if (prhdr->auth_len) {
796                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
797                                           "Connection to %s - got non-zero "
798                                           "auth len %u.\n",
799                                         rpccli_pipe_txt(debug_ctx(), cli),
800                                         (unsigned int)prhdr->auth_len ));
801                                 return NT_STATUS_INVALID_PARAMETER;
802                         }
803                         break;
804
805                 case PIPE_AUTH_TYPE_NTLMSSP:
806                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
807                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
808                         if (!NT_STATUS_IS_OK(ret)) {
809                                 return ret;
810                         }
811                         break;
812
813                 case PIPE_AUTH_TYPE_SCHANNEL:
814                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
815                         if (!NT_STATUS_IS_OK(ret)) {
816                                 return ret;
817                         }
818                         break;
819
820                 case PIPE_AUTH_TYPE_KRB5:
821                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
822                 default:
823                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
824                                   "to %s - unknown internal auth type %u.\n",
825                                   rpccli_pipe_txt(debug_ctx(), cli),
826                                   cli->auth->auth_type ));
827                         return NT_STATUS_INVALID_INFO_CLASS;
828         }
829
830         return NT_STATUS_OK;
831 }
832
833 /****************************************************************************
834  Do basic authentication checks on an incoming pdu.
835  ****************************************************************************/
836
837 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
838                         prs_struct *current_pdu,
839                         uint8 expected_pkt_type,
840                         char **ppdata,
841                         uint32 *pdata_len,
842                         prs_struct *return_data)
843 {
844
845         NTSTATUS ret = NT_STATUS_OK;
846         uint32 current_pdu_len = prs_data_size(current_pdu);
847
848         if (current_pdu_len != prhdr->frag_len) {
849                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
850                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
851                 return NT_STATUS_INVALID_PARAMETER;
852         }
853
854         /*
855          * Point the return values at the real data including the RPC
856          * header. Just in case the caller wants it.
857          */
858         *ppdata = prs_data_p(current_pdu);
859         *pdata_len = current_pdu_len;
860
861         /* Ensure we have the correct type. */
862         switch (prhdr->pkt_type) {
863                 case RPC_ALTCONTRESP:
864                 case RPC_BINDACK:
865
866                         /* Alter context and bind ack share the same packet definitions. */
867                         break;
868
869
870                 case RPC_RESPONSE:
871                 {
872                         RPC_HDR_RESP rhdr_resp;
873                         uint8 ss_padding_len = 0;
874
875                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
876                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
877                                 return NT_STATUS_BUFFER_TOO_SMALL;
878                         }
879
880                         /* Here's where we deal with incoming sign/seal. */
881                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
882                                         current_pdu, &ss_padding_len);
883                         if (!NT_STATUS_IS_OK(ret)) {
884                                 return ret;
885                         }
886
887                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
888                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
889
890                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
891                                 return NT_STATUS_BUFFER_TOO_SMALL;
892                         }
893
894                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
895
896                         /* Remember to remove the auth footer. */
897                         if (prhdr->auth_len) {
898                                 /* We've already done integer wrap tests on auth_len in
899                                         cli_pipe_validate_rpc_response(). */
900                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
901                                         return NT_STATUS_BUFFER_TOO_SMALL;
902                                 }
903                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
904                         }
905
906                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
907                                 current_pdu_len, *pdata_len, ss_padding_len ));
908
909                         /*
910                          * If this is the first reply, and the allocation hint is reasonably, try and
911                          * set up the return_data parse_struct to the correct size.
912                          */
913
914                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
915                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
916                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
917                                                 "too large to allocate\n",
918                                                 (unsigned int)rhdr_resp.alloc_hint ));
919                                         return NT_STATUS_NO_MEMORY;
920                                 }
921                         }
922
923                         break;
924                 }
925
926                 case RPC_BINDNACK:
927                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
928                                   "received from %s!\n",
929                                   rpccli_pipe_txt(debug_ctx(), cli)));
930                         /* Use this for now... */
931                         return NT_STATUS_NETWORK_ACCESS_DENIED;
932
933                 case RPC_FAULT:
934                 {
935                         RPC_HDR_RESP rhdr_resp;
936                         RPC_HDR_FAULT fault_resp;
937
938                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
939                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
940                                 return NT_STATUS_BUFFER_TOO_SMALL;
941                         }
942
943                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
944                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
945                                 return NT_STATUS_BUFFER_TOO_SMALL;
946                         }
947
948                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
949                                   "code %s received from %s!\n",
950                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
951                                 rpccli_pipe_txt(debug_ctx(), cli)));
952                         if (NT_STATUS_IS_OK(fault_resp.status)) {
953                                 return NT_STATUS_UNSUCCESSFUL;
954                         } else {
955                                 return fault_resp.status;
956                         }
957                 }
958
959                 default:
960                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
961                                 "from %s!\n",
962                                 (unsigned int)prhdr->pkt_type,
963                                 rpccli_pipe_txt(debug_ctx(), cli)));
964                         return NT_STATUS_INVALID_INFO_CLASS;
965         }
966
967         if (prhdr->pkt_type != expected_pkt_type) {
968                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
969                           "got an unexpected RPC packet type - %u, not %u\n",
970                         rpccli_pipe_txt(debug_ctx(), cli),
971                         prhdr->pkt_type,
972                         expected_pkt_type));
973                 return NT_STATUS_INVALID_INFO_CLASS;
974         }
975
976         /* Do this just before return - we don't want to modify any rpc header
977            data before now as we may have needed to do cryptographic actions on
978            it before. */
979
980         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
981                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
982                         "setting fragment first/last ON.\n"));
983                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
984         }
985
986         return NT_STATUS_OK;
987 }
988
989 /****************************************************************************
990  Ensure we eat the just processed pdu from the current_pdu prs_struct.
991  Normally the frag_len and buffer size will match, but on the first trans
992  reply there is a theoretical chance that buffer size > frag_len, so we must
993  deal with that.
994  ****************************************************************************/
995
996 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
997 {
998         uint32 current_pdu_len = prs_data_size(current_pdu);
999
1000         if (current_pdu_len < prhdr->frag_len) {
1001                 return NT_STATUS_BUFFER_TOO_SMALL;
1002         }
1003
1004         /* Common case. */
1005         if (current_pdu_len == (uint32)prhdr->frag_len) {
1006                 prs_mem_free(current_pdu);
1007                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1008                 /* Make current_pdu dynamic with no memory. */
1009                 prs_give_memory(current_pdu, 0, 0, True);
1010                 return NT_STATUS_OK;
1011         }
1012
1013         /*
1014          * Oh no ! More data in buffer than we processed in current pdu.
1015          * Cheat. Move the data down and shrink the buffer.
1016          */
1017
1018         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1019                         current_pdu_len - prhdr->frag_len);
1020
1021         /* Remember to set the read offset back to zero. */
1022         prs_set_offset(current_pdu, 0);
1023
1024         /* Shrink the buffer. */
1025         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1026                 return NT_STATUS_BUFFER_TOO_SMALL;
1027         }
1028
1029         return NT_STATUS_OK;
1030 }
1031
1032 /****************************************************************************
1033  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1034 ****************************************************************************/
1035
1036 struct cli_api_pipe_state {
1037         struct event_context *ev;
1038         struct rpc_cli_transport *transport;
1039         uint8_t *rdata;
1040         uint32_t rdata_len;
1041 };
1042
1043 static void cli_api_pipe_trans_done(struct async_req *subreq);
1044 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1045 static void cli_api_pipe_read_done(struct async_req *subreq);
1046
1047 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1048                                            struct event_context *ev,
1049                                            struct rpc_cli_transport *transport,
1050                                            uint8_t *data, size_t data_len,
1051                                            uint32_t max_rdata_len)
1052 {
1053         struct async_req *result, *subreq;
1054         struct tevent_req *subreq2;
1055         struct cli_api_pipe_state *state;
1056         NTSTATUS status;
1057
1058         if (!async_req_setup(mem_ctx, &result, &state,
1059                              struct cli_api_pipe_state)) {
1060                 return NULL;
1061         }
1062         state->ev = ev;
1063         state->transport = transport;
1064
1065         if (max_rdata_len < RPC_HEADER_LEN) {
1066                 /*
1067                  * For a RPC reply we always need at least RPC_HEADER_LEN
1068                  * bytes. We check this here because we will receive
1069                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1070                  */
1071                 status = NT_STATUS_INVALID_PARAMETER;
1072                 goto post_status;
1073         }
1074
1075         if (transport->trans_send != NULL) {
1076                 subreq = transport->trans_send(state, ev, data, data_len,
1077                                                max_rdata_len, transport->priv);
1078                 if (subreq == NULL) {
1079                         status = NT_STATUS_NO_MEMORY;
1080                         goto post_status;
1081                 }
1082                 subreq->async.fn = cli_api_pipe_trans_done;
1083                 subreq->async.priv = result;
1084                 return result;
1085         }
1086
1087         /*
1088          * If the transport does not provide a "trans" routine, i.e. for
1089          * example the ncacn_ip_tcp transport, do the write/read step here.
1090          */
1091
1092         subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1093         if (subreq2 == NULL) {
1094                 goto fail;
1095         }
1096         tevent_req_set_callback(subreq2, cli_api_pipe_write_done, result);
1097         return result;
1098
1099         status = NT_STATUS_INVALID_PARAMETER;
1100
1101  post_status:
1102         if (async_post_ntstatus(result, ev, status)) {
1103                 return result;
1104         }
1105  fail:
1106         TALLOC_FREE(result);
1107         return NULL;
1108 }
1109
1110 static void cli_api_pipe_trans_done(struct async_req *subreq)
1111 {
1112         struct async_req *req = talloc_get_type_abort(
1113                 subreq->async.priv, struct async_req);
1114         struct cli_api_pipe_state *state = talloc_get_type_abort(
1115                 req->private_data, struct cli_api_pipe_state);
1116         NTSTATUS status;
1117
1118         status = state->transport->trans_recv(subreq, state, &state->rdata,
1119                                               &state->rdata_len);
1120         TALLOC_FREE(subreq);
1121         if (!NT_STATUS_IS_OK(status)) {
1122                 async_req_nterror(req, status);
1123                 return;
1124         }
1125         async_req_done(req);
1126 }
1127
1128 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1129 {
1130         struct async_req *req = tevent_req_callback_data(
1131                 subreq, struct async_req);
1132         struct cli_api_pipe_state *state = talloc_get_type_abort(
1133                 req->private_data, struct cli_api_pipe_state);
1134         struct async_req *subreq2;
1135         NTSTATUS status;
1136
1137         status = rpc_write_recv(subreq);
1138         TALLOC_FREE(subreq);
1139         if (!NT_STATUS_IS_OK(status)) {
1140                 async_req_nterror(req, status);
1141                 return;
1142         }
1143
1144         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1145         if (async_req_nomem(state->rdata, req)) {
1146                 return;
1147         }
1148
1149         /*
1150          * We don't need to use rpc_read_send here, the upper layer will cope
1151          * with a short read, transport->trans_send could also return less
1152          * than state->max_rdata_len.
1153          */
1154         subreq2 = state->transport->read_send(state, state->ev, state->rdata,
1155                                               RPC_HEADER_LEN,
1156                                               state->transport->priv);
1157         if (async_req_nomem(subreq2, req)) {
1158                 return;
1159         }
1160         subreq2->async.fn = cli_api_pipe_read_done;
1161         subreq2->async.priv = req;
1162 }
1163
1164 static void cli_api_pipe_read_done(struct async_req *subreq)
1165 {
1166         struct async_req *req = talloc_get_type_abort(
1167                 subreq->async.priv, struct async_req);
1168         struct cli_api_pipe_state *state = talloc_get_type_abort(
1169                 req->private_data, struct cli_api_pipe_state);
1170         NTSTATUS status;
1171         ssize_t received;
1172
1173         status = state->transport->read_recv(subreq, &received);
1174         TALLOC_FREE(subreq);
1175         if (!NT_STATUS_IS_OK(status)) {
1176                 async_req_nterror(req, status);
1177                 return;
1178         }
1179         state->rdata_len = received;
1180         async_req_done(req);
1181 }
1182
1183 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1184                                   uint8_t **prdata, uint32_t *prdata_len)
1185 {
1186         struct cli_api_pipe_state *state = talloc_get_type_abort(
1187                 req->private_data, struct cli_api_pipe_state);
1188         NTSTATUS status;
1189
1190         if (async_req_is_nterror(req, &status)) {
1191                 return status;
1192         }
1193
1194         *prdata = talloc_move(mem_ctx, &state->rdata);
1195         *prdata_len = state->rdata_len;
1196         return NT_STATUS_OK;
1197 }
1198
1199 /****************************************************************************
1200  Send data on an rpc pipe via trans. The prs_struct data must be the last
1201  pdu fragment of an NDR data stream.
1202
1203  Receive response data from an rpc pipe, which may be large...
1204
1205  Read the first fragment: unfortunately have to use SMBtrans for the first
1206  bit, then SMBreadX for subsequent bits.
1207
1208  If first fragment received also wasn't the last fragment, continue
1209  getting fragments until we _do_ receive the last fragment.
1210
1211  Request/Response PDU's look like the following...
1212
1213  |<------------------PDU len----------------------------------------------->|
1214  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1215
1216  +------------+-----------------+-------------+---------------+-------------+
1217  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1218  +------------+-----------------+-------------+---------------+-------------+
1219
1220  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1221  signing & sealing being negotiated.
1222
1223  ****************************************************************************/
1224
1225 struct rpc_api_pipe_state {
1226         struct event_context *ev;
1227         struct rpc_pipe_client *cli;
1228         uint8_t expected_pkt_type;
1229
1230         prs_struct incoming_frag;
1231         struct rpc_hdr_info rhdr;
1232
1233         prs_struct incoming_pdu;        /* Incoming reply */
1234         uint32_t incoming_pdu_offset;
1235 };
1236
1237 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1238 {
1239         prs_mem_free(&state->incoming_frag);
1240         prs_mem_free(&state->incoming_pdu);
1241         return 0;
1242 }
1243
1244 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1245 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1246
1247 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1248                                            struct event_context *ev,
1249                                            struct rpc_pipe_client *cli,
1250                                            prs_struct *data, /* Outgoing PDU */
1251                                            uint8_t expected_pkt_type)
1252 {
1253         struct async_req *result, *subreq;
1254         struct rpc_api_pipe_state *state;
1255         uint16_t max_recv_frag;
1256         NTSTATUS status;
1257
1258         if (!async_req_setup(mem_ctx, &result, &state,
1259                              struct rpc_api_pipe_state)) {
1260                 return NULL;
1261         }
1262         state->ev = ev;
1263         state->cli = cli;
1264         state->expected_pkt_type = expected_pkt_type;
1265         state->incoming_pdu_offset = 0;
1266
1267         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1268
1269         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1270         /* Make incoming_pdu dynamic with no memory. */
1271         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1272
1273         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1274
1275         /*
1276          * Ensure we're not sending too much.
1277          */
1278         if (prs_offset(data) > cli->max_xmit_frag) {
1279                 status = NT_STATUS_INVALID_PARAMETER;
1280                 goto post_status;
1281         }
1282
1283         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1284
1285         max_recv_frag = cli->max_recv_frag;
1286
1287 #ifdef DEVELOPER
1288         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1289 #endif
1290
1291         subreq = cli_api_pipe_send(state, ev, cli->transport,
1292                                    (uint8_t *)prs_data_p(data),
1293                                    prs_offset(data), max_recv_frag);
1294         if (subreq == NULL) {
1295                 status = NT_STATUS_NO_MEMORY;
1296                 goto post_status;
1297         }
1298         subreq->async.fn = rpc_api_pipe_trans_done;
1299         subreq->async.priv = result;
1300         return result;
1301
1302  post_status:
1303         if (async_post_ntstatus(result, ev, status)) {
1304                 return result;
1305         }
1306         TALLOC_FREE(result);
1307         return NULL;
1308 }
1309
1310 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1311 {
1312         struct async_req *req = talloc_get_type_abort(
1313                 subreq->async.priv, struct async_req);
1314         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1315                 req->private_data, struct rpc_api_pipe_state);
1316         struct tevent_req *subreq2;
1317         NTSTATUS status;
1318         uint8_t *rdata = NULL;
1319         uint32_t rdata_len = 0;
1320         char *rdata_copy;
1321
1322         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1323         TALLOC_FREE(subreq);
1324         if (!NT_STATUS_IS_OK(status)) {
1325                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1326                 async_req_nterror(req, status);
1327                 return;
1328         }
1329
1330         if (rdata == NULL) {
1331                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1332                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1333                 async_req_done(req);
1334                 return;
1335         }
1336
1337         /*
1338          * Give the memory received from cli_trans as dynamic to the current
1339          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1340          * :-(
1341          */
1342         rdata_copy = (char *)memdup(rdata, rdata_len);
1343         TALLOC_FREE(rdata);
1344         if (async_req_nomem(rdata_copy, req)) {
1345                 return;
1346         }
1347         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1348
1349         /* Ensure we have enough data for a pdu. */
1350         subreq2 = get_complete_frag_send(state, state->ev, state->cli,
1351                                          &state->rhdr, &state->incoming_frag);
1352         if (async_req_nomem(subreq2, req)) {
1353                 return;
1354         }
1355         tevent_req_set_callback(subreq2, rpc_api_pipe_got_pdu, req);
1356 }
1357
1358 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1359 {
1360         struct async_req *req = tevent_req_callback_data(
1361                 subreq, struct async_req);
1362         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1363                 req->private_data, struct rpc_api_pipe_state);
1364         NTSTATUS status;
1365         char *rdata = NULL;
1366         uint32_t rdata_len = 0;
1367
1368         status = get_complete_frag_recv(subreq);
1369         TALLOC_FREE(subreq);
1370         if (!NT_STATUS_IS_OK(status)) {
1371                 DEBUG(5, ("get_complete_frag failed: %s\n",
1372                           nt_errstr(status)));
1373                 async_req_nterror(req, status);
1374                 return;
1375         }
1376
1377         status = cli_pipe_validate_current_pdu(
1378                 state->cli, &state->rhdr, &state->incoming_frag,
1379                 state->expected_pkt_type, &rdata, &rdata_len,
1380                 &state->incoming_pdu);
1381
1382         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1383                   (unsigned)prs_data_size(&state->incoming_frag),
1384                   (unsigned)state->incoming_pdu_offset,
1385                   nt_errstr(status)));
1386
1387         if (!NT_STATUS_IS_OK(status)) {
1388                 async_req_nterror(req, status);
1389                 return;
1390         }
1391
1392         if ((state->rhdr.flags & RPC_FLG_FIRST)
1393             && (state->rhdr.pack_type[0] == 0)) {
1394                 /*
1395                  * Set the data type correctly for big-endian data on the
1396                  * first packet.
1397                  */
1398                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1399                           "big-endian.\n",
1400                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1401                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1402         }
1403         /*
1404          * Check endianness on subsequent packets.
1405          */
1406         if (state->incoming_frag.bigendian_data
1407             != state->incoming_pdu.bigendian_data) {
1408                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1409                          "%s\n",
1410                          state->incoming_pdu.bigendian_data?"big":"little",
1411                          state->incoming_frag.bigendian_data?"big":"little"));
1412                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1413                 return;
1414         }
1415
1416         /* Now copy the data portion out of the pdu into rbuf. */
1417         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1418                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1419                 return;
1420         }
1421
1422         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1423                rdata, (size_t)rdata_len);
1424         state->incoming_pdu_offset += rdata_len;
1425
1426         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1427                                             &state->incoming_frag);
1428         if (!NT_STATUS_IS_OK(status)) {
1429                 async_req_nterror(req, status);
1430                 return;
1431         }
1432
1433         if (state->rhdr.flags & RPC_FLG_LAST) {
1434                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1435                           rpccli_pipe_txt(debug_ctx(), state->cli),
1436                           (unsigned)prs_data_size(&state->incoming_pdu)));
1437                 async_req_done(req);
1438                 return;
1439         }
1440
1441         subreq = get_complete_frag_send(state, state->ev, state->cli,
1442                                         &state->rhdr, &state->incoming_frag);
1443         if (async_req_nomem(subreq, req)) {
1444                 return;
1445         }
1446         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1447 }
1448
1449 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1450                                   prs_struct *reply_pdu)
1451 {
1452         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1453                 req->private_data, struct rpc_api_pipe_state);
1454         NTSTATUS status;
1455
1456         if (async_req_is_nterror(req, &status)) {
1457                 return status;
1458         }
1459
1460         *reply_pdu = state->incoming_pdu;
1461         reply_pdu->mem_ctx = mem_ctx;
1462
1463         /*
1464          * Prevent state->incoming_pdu from being freed in
1465          * rpc_api_pipe_state_destructor()
1466          */
1467         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1468
1469         return NT_STATUS_OK;
1470 }
1471
1472 /*******************************************************************
1473  Creates krb5 auth bind.
1474  ********************************************************************/
1475
1476 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1477                                                 enum pipe_auth_level auth_level,
1478                                                 RPC_HDR_AUTH *pauth_out,
1479                                                 prs_struct *auth_data)
1480 {
1481 #ifdef HAVE_KRB5
1482         int ret;
1483         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1484         DATA_BLOB tkt = data_blob_null;
1485         DATA_BLOB tkt_wrapped = data_blob_null;
1486
1487         /* We may change the pad length before marshalling. */
1488         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1489
1490         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1491                 a->service_principal ));
1492
1493         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1494
1495         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1496                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1497
1498         if (ret) {
1499                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1500                         "failed with %s\n",
1501                         a->service_principal,
1502                         error_message(ret) ));
1503
1504                 data_blob_free(&tkt);
1505                 prs_mem_free(auth_data);
1506                 return NT_STATUS_INVALID_PARAMETER;
1507         }
1508
1509         /* wrap that up in a nice GSS-API wrapping */
1510         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1511
1512         data_blob_free(&tkt);
1513
1514         /* Auth len in the rpc header doesn't include auth_header. */
1515         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1516                 data_blob_free(&tkt_wrapped);
1517                 prs_mem_free(auth_data);
1518                 return NT_STATUS_NO_MEMORY;
1519         }
1520
1521         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1522         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1523
1524         data_blob_free(&tkt_wrapped);
1525         return NT_STATUS_OK;
1526 #else
1527         return NT_STATUS_INVALID_PARAMETER;
1528 #endif
1529 }
1530
1531 /*******************************************************************
1532  Creates SPNEGO NTLMSSP auth bind.
1533  ********************************************************************/
1534
1535 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1536                                                 enum pipe_auth_level auth_level,
1537                                                 RPC_HDR_AUTH *pauth_out,
1538                                                 prs_struct *auth_data)
1539 {
1540         NTSTATUS nt_status;
1541         DATA_BLOB null_blob = data_blob_null;
1542         DATA_BLOB request = data_blob_null;
1543         DATA_BLOB spnego_msg = data_blob_null;
1544
1545         /* We may change the pad length before marshalling. */
1546         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1547
1548         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1549         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1550                                         null_blob,
1551                                         &request);
1552
1553         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1554                 data_blob_free(&request);
1555                 prs_mem_free(auth_data);
1556                 return nt_status;
1557         }
1558
1559         /* Wrap this in SPNEGO. */
1560         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1561
1562         data_blob_free(&request);
1563
1564         /* Auth len in the rpc header doesn't include auth_header. */
1565         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1566                 data_blob_free(&spnego_msg);
1567                 prs_mem_free(auth_data);
1568                 return NT_STATUS_NO_MEMORY;
1569         }
1570
1571         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1572         dump_data(5, spnego_msg.data, spnego_msg.length);
1573
1574         data_blob_free(&spnego_msg);
1575         return NT_STATUS_OK;
1576 }
1577
1578 /*******************************************************************
1579  Creates NTLMSSP auth bind.
1580  ********************************************************************/
1581
1582 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1583                                                 enum pipe_auth_level auth_level,
1584                                                 RPC_HDR_AUTH *pauth_out,
1585                                                 prs_struct *auth_data)
1586 {
1587         NTSTATUS nt_status;
1588         DATA_BLOB null_blob = data_blob_null;
1589         DATA_BLOB request = data_blob_null;
1590
1591         /* We may change the pad length before marshalling. */
1592         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1593
1594         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1595         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1596                                         null_blob,
1597                                         &request);
1598
1599         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1600                 data_blob_free(&request);
1601                 prs_mem_free(auth_data);
1602                 return nt_status;
1603         }
1604
1605         /* Auth len in the rpc header doesn't include auth_header. */
1606         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1607                 data_blob_free(&request);
1608                 prs_mem_free(auth_data);
1609                 return NT_STATUS_NO_MEMORY;
1610         }
1611
1612         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1613         dump_data(5, request.data, request.length);
1614
1615         data_blob_free(&request);
1616         return NT_STATUS_OK;
1617 }
1618
1619 /*******************************************************************
1620  Creates schannel auth bind.
1621  ********************************************************************/
1622
1623 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1624                                                 enum pipe_auth_level auth_level,
1625                                                 RPC_HDR_AUTH *pauth_out,
1626                                                 prs_struct *auth_data)
1627 {
1628         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1629
1630         /* We may change the pad length before marshalling. */
1631         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1632
1633         /* Use lp_workgroup() if domain not specified */
1634
1635         if (!cli->auth->domain || !cli->auth->domain[0]) {
1636                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1637                 if (cli->auth->domain == NULL) {
1638                         return NT_STATUS_NO_MEMORY;
1639                 }
1640         }
1641
1642         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1643                                    global_myname());
1644
1645         /*
1646          * Now marshall the data into the auth parse_struct.
1647          */
1648
1649         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1650                                        &schannel_neg, auth_data, 0)) {
1651                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1652                 prs_mem_free(auth_data);
1653                 return NT_STATUS_NO_MEMORY;
1654         }
1655
1656         return NT_STATUS_OK;
1657 }
1658
1659 /*******************************************************************
1660  Creates the internals of a DCE/RPC bind request or alter context PDU.
1661  ********************************************************************/
1662
1663 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1664                                                 prs_struct *rpc_out, 
1665                                                 uint32 rpc_call_id,
1666                                                 const RPC_IFACE *abstract,
1667                                                 const RPC_IFACE *transfer,
1668                                                 RPC_HDR_AUTH *phdr_auth,
1669                                                 prs_struct *pauth_info)
1670 {
1671         RPC_HDR hdr;
1672         RPC_HDR_RB hdr_rb;
1673         RPC_CONTEXT rpc_ctx;
1674         uint16 auth_len = prs_offset(pauth_info);
1675         uint8 ss_padding_len = 0;
1676         uint16 frag_len = 0;
1677
1678         /* create the RPC context. */
1679         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1680
1681         /* create the bind request RPC_HDR_RB */
1682         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1683
1684         /* Start building the frag length. */
1685         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1686
1687         /* Do we need to pad ? */
1688         if (auth_len) {
1689                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1690                 if (data_len % 8) {
1691                         ss_padding_len = 8 - (data_len % 8);
1692                         phdr_auth->auth_pad_len = ss_padding_len;
1693                 }
1694                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1695         }
1696
1697         /* Create the request RPC_HDR */
1698         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1699
1700         /* Marshall the RPC header */
1701         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1702                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1703                 return NT_STATUS_NO_MEMORY;
1704         }
1705
1706         /* Marshall the bind request data */
1707         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1708                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1709                 return NT_STATUS_NO_MEMORY;
1710         }
1711
1712         /*
1713          * Grow the outgoing buffer to store any auth info.
1714          */
1715
1716         if(auth_len != 0) {
1717                 if (ss_padding_len) {
1718                         char pad[8];
1719                         memset(pad, '\0', 8);
1720                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1721                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1722                                 return NT_STATUS_NO_MEMORY;
1723                         }
1724                 }
1725
1726                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1727                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1728                         return NT_STATUS_NO_MEMORY;
1729                 }
1730
1731
1732                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1733                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1734                         return NT_STATUS_NO_MEMORY;
1735                 }
1736         }
1737
1738         return NT_STATUS_OK;
1739 }
1740
1741 /*******************************************************************
1742  Creates a DCE/RPC bind request.
1743  ********************************************************************/
1744
1745 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1746                                 prs_struct *rpc_out, 
1747                                 uint32 rpc_call_id,
1748                                 const RPC_IFACE *abstract,
1749                                 const RPC_IFACE *transfer,
1750                                 enum pipe_auth_type auth_type,
1751                                 enum pipe_auth_level auth_level)
1752 {
1753         RPC_HDR_AUTH hdr_auth;
1754         prs_struct auth_info;
1755         NTSTATUS ret = NT_STATUS_OK;
1756
1757         ZERO_STRUCT(hdr_auth);
1758         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1759                 return NT_STATUS_NO_MEMORY;
1760
1761         switch (auth_type) {
1762                 case PIPE_AUTH_TYPE_SCHANNEL:
1763                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1764                         if (!NT_STATUS_IS_OK(ret)) {
1765                                 prs_mem_free(&auth_info);
1766                                 return ret;
1767                         }
1768                         break;
1769
1770                 case PIPE_AUTH_TYPE_NTLMSSP:
1771                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1772                         if (!NT_STATUS_IS_OK(ret)) {
1773                                 prs_mem_free(&auth_info);
1774                                 return ret;
1775                         }
1776                         break;
1777
1778                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1779                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1780                         if (!NT_STATUS_IS_OK(ret)) {
1781                                 prs_mem_free(&auth_info);
1782                                 return ret;
1783                         }
1784                         break;
1785
1786                 case PIPE_AUTH_TYPE_KRB5:
1787                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1788                         if (!NT_STATUS_IS_OK(ret)) {
1789                                 prs_mem_free(&auth_info);
1790                                 return ret;
1791                         }
1792                         break;
1793
1794                 case PIPE_AUTH_TYPE_NONE:
1795                         break;
1796
1797                 default:
1798                         /* "Can't" happen. */
1799                         return NT_STATUS_INVALID_INFO_CLASS;
1800         }
1801
1802         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1803                                                 rpc_out, 
1804                                                 rpc_call_id,
1805                                                 abstract,
1806                                                 transfer,
1807                                                 &hdr_auth,
1808                                                 &auth_info);
1809
1810         prs_mem_free(&auth_info);
1811         return ret;
1812 }
1813
1814 /*******************************************************************
1815  Create and add the NTLMSSP sign/seal auth header and data.
1816  ********************************************************************/
1817
1818 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1819                                         RPC_HDR *phdr,
1820                                         uint32 ss_padding_len,
1821                                         prs_struct *outgoing_pdu)
1822 {
1823         RPC_HDR_AUTH auth_info;
1824         NTSTATUS status;
1825         DATA_BLOB auth_blob = data_blob_null;
1826         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1827
1828         if (!cli->auth->a_u.ntlmssp_state) {
1829                 return NT_STATUS_INVALID_PARAMETER;
1830         }
1831
1832         /* Init and marshall the auth header. */
1833         init_rpc_hdr_auth(&auth_info,
1834                         map_pipe_auth_type_to_rpc_auth_type(
1835                                 cli->auth->auth_type),
1836                         cli->auth->auth_level,
1837                         ss_padding_len,
1838                         1 /* context id. */);
1839
1840         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1841                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1842                 data_blob_free(&auth_blob);
1843                 return NT_STATUS_NO_MEMORY;
1844         }
1845
1846         switch (cli->auth->auth_level) {
1847                 case PIPE_AUTH_LEVEL_PRIVACY:
1848                         /* Data portion is encrypted. */
1849                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1850                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1851                                         data_and_pad_len,
1852                                         (unsigned char *)prs_data_p(outgoing_pdu),
1853                                         (size_t)prs_offset(outgoing_pdu),
1854                                         &auth_blob);
1855                         if (!NT_STATUS_IS_OK(status)) {
1856                                 data_blob_free(&auth_blob);
1857                                 return status;
1858                         }
1859                         break;
1860
1861                 case PIPE_AUTH_LEVEL_INTEGRITY:
1862                         /* Data is signed. */
1863                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1864                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1865                                         data_and_pad_len,
1866                                         (unsigned char *)prs_data_p(outgoing_pdu),
1867                                         (size_t)prs_offset(outgoing_pdu),
1868                                         &auth_blob);
1869                         if (!NT_STATUS_IS_OK(status)) {
1870                                 data_blob_free(&auth_blob);
1871                                 return status;
1872                         }
1873                         break;
1874
1875                 default:
1876                         /* Can't happen. */
1877                         smb_panic("bad auth level");
1878                         /* Notreached. */
1879                         return NT_STATUS_INVALID_PARAMETER;
1880         }
1881
1882         /* Finally marshall the blob. */
1883
1884         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1885                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1886                         (unsigned int)NTLMSSP_SIG_SIZE));
1887                 data_blob_free(&auth_blob);
1888                 return NT_STATUS_NO_MEMORY;
1889         }
1890
1891         data_blob_free(&auth_blob);
1892         return NT_STATUS_OK;
1893 }
1894
1895 /*******************************************************************
1896  Create and add the schannel sign/seal auth header and data.
1897  ********************************************************************/
1898
1899 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1900                                         RPC_HDR *phdr,
1901                                         uint32 ss_padding_len,
1902                                         prs_struct *outgoing_pdu)
1903 {
1904         RPC_HDR_AUTH auth_info;
1905         RPC_AUTH_SCHANNEL_CHK verf;
1906         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1907         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1908         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1909
1910         if (!sas) {
1911                 return NT_STATUS_INVALID_PARAMETER;
1912         }
1913
1914         /* Init and marshall the auth header. */
1915         init_rpc_hdr_auth(&auth_info,
1916                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1917                         cli->auth->auth_level,
1918                         ss_padding_len,
1919                         1 /* context id. */);
1920
1921         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1922                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1923                 return NT_STATUS_NO_MEMORY;
1924         }
1925
1926         switch (cli->auth->auth_level) {
1927                 case PIPE_AUTH_LEVEL_PRIVACY:
1928                 case PIPE_AUTH_LEVEL_INTEGRITY:
1929                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1930                                 sas->seq_num));
1931
1932                         schannel_encode(sas,
1933                                         cli->auth->auth_level,
1934                                         SENDER_IS_INITIATOR,
1935                                         &verf,
1936                                         data_p,
1937                                         data_and_pad_len);
1938
1939                         sas->seq_num++;
1940                         break;
1941
1942                 default:
1943                         /* Can't happen. */
1944                         smb_panic("bad auth level");
1945                         /* Notreached. */
1946                         return NT_STATUS_INVALID_PARAMETER;
1947         }
1948
1949         /* Finally marshall the blob. */
1950         smb_io_rpc_auth_schannel_chk("",
1951                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1952                         &verf,
1953                         outgoing_pdu,
1954                         0);
1955
1956         return NT_STATUS_OK;
1957 }
1958
1959 /*******************************************************************
1960  Calculate how much data we're going to send in this packet, also
1961  work out any sign/seal padding length.
1962  ********************************************************************/
1963
1964 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1965                                         uint32 data_left,
1966                                         uint16 *p_frag_len,
1967                                         uint16 *p_auth_len,
1968                                         uint32 *p_ss_padding)
1969 {
1970         uint32 data_space, data_len;
1971
1972 #ifdef DEVELOPER
1973         if ((data_left > 0) && (sys_random() % 2)) {
1974                 data_left = MAX(data_left/2, 1);
1975         }
1976 #endif
1977
1978         switch (cli->auth->auth_level) {
1979                 case PIPE_AUTH_LEVEL_NONE:
1980                 case PIPE_AUTH_LEVEL_CONNECT:
1981                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1982                         data_len = MIN(data_space, data_left);
1983                         *p_ss_padding = 0;
1984                         *p_auth_len = 0;
1985                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1986                         return data_len;
1987
1988                 case PIPE_AUTH_LEVEL_INTEGRITY:
1989                 case PIPE_AUTH_LEVEL_PRIVACY:
1990                         /* Treat the same for all authenticated rpc requests. */
1991                         switch(cli->auth->auth_type) {
1992                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1993                                 case PIPE_AUTH_TYPE_NTLMSSP:
1994                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1995                                         break;
1996                                 case PIPE_AUTH_TYPE_SCHANNEL:
1997                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1998                                         break;
1999                                 default:
2000                                         smb_panic("bad auth type");
2001                                         break;
2002                         }
2003
2004                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2005                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2006
2007                         data_len = MIN(data_space, data_left);
2008                         *p_ss_padding = 0;
2009                         if (data_len % 8) {
2010                                 *p_ss_padding = 8 - (data_len % 8);
2011                         }
2012                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2013                                         data_len + *p_ss_padding +              /* data plus padding. */
2014                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2015                         return data_len;
2016
2017                 default:
2018                         smb_panic("bad auth level");
2019                         /* Notreached. */
2020                         return 0;
2021         }
2022 }
2023
2024 /*******************************************************************
2025  External interface.
2026  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2027  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2028  and deals with signing/sealing details.
2029  ********************************************************************/
2030
2031 struct rpc_api_pipe_req_state {
2032         struct event_context *ev;
2033         struct rpc_pipe_client *cli;
2034         uint8_t op_num;
2035         uint32_t call_id;
2036         prs_struct *req_data;
2037         uint32_t req_data_sent;
2038         prs_struct outgoing_frag;
2039         prs_struct reply_pdu;
2040 };
2041
2042 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2043 {
2044         prs_mem_free(&s->outgoing_frag);
2045         prs_mem_free(&s->reply_pdu);
2046         return 0;
2047 }
2048
2049 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2050 static void rpc_api_pipe_req_done(struct async_req *subreq);
2051 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2052                                   bool *is_last_frag);
2053
2054 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2055                                         struct event_context *ev,
2056                                         struct rpc_pipe_client *cli,
2057                                         uint8_t op_num,
2058                                         prs_struct *req_data)
2059 {
2060         struct async_req *result, *subreq;
2061         struct tevent_req *subreq2;
2062         struct rpc_api_pipe_req_state *state;
2063         NTSTATUS status;
2064         bool is_last_frag;
2065
2066         if (!async_req_setup(mem_ctx, &result, &state,
2067                              struct rpc_api_pipe_req_state)) {
2068                 return NULL;
2069         }
2070         state->ev = ev;
2071         state->cli = cli;
2072         state->op_num = op_num;
2073         state->req_data = req_data;
2074         state->req_data_sent = 0;
2075         state->call_id = get_rpc_call_id();
2076
2077         if (cli->max_xmit_frag
2078             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2079                 /* Server is screwed up ! */
2080                 status = NT_STATUS_INVALID_PARAMETER;
2081                 goto post_status;
2082         }
2083
2084         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2085
2086         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2087                       state, MARSHALL)) {
2088                 status = NT_STATUS_NO_MEMORY;
2089                 goto post_status;
2090         }
2091
2092         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2093
2094         status = prepare_next_frag(state, &is_last_frag);
2095         if (!NT_STATUS_IS_OK(status)) {
2096                 goto post_status;
2097         }
2098
2099         if (is_last_frag) {
2100                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2101                                            &state->outgoing_frag,
2102                                            RPC_RESPONSE);
2103                 if (subreq == NULL) {
2104                         status = NT_STATUS_NO_MEMORY;
2105                         goto post_status;
2106                 }
2107                 subreq->async.fn = rpc_api_pipe_req_done;
2108                 subreq->async.priv = result;
2109         } else {
2110                 subreq2 = rpc_write_send(
2111                         state, ev, cli->transport,
2112                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2113                         prs_offset(&state->outgoing_frag));
2114                 if (subreq2 == NULL) {
2115                         status = NT_STATUS_NO_MEMORY;
2116                         goto post_status;
2117                 }
2118                 tevent_req_set_callback(subreq2, rpc_api_pipe_req_write_done,
2119                                         result);
2120         }
2121         return result;
2122
2123  post_status:
2124         if (async_post_ntstatus(result, ev, status)) {
2125                 return result;
2126         }
2127         TALLOC_FREE(result);
2128         return NULL;
2129 }
2130
2131 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2132                                   bool *is_last_frag)
2133 {
2134         RPC_HDR hdr;
2135         RPC_HDR_REQ hdr_req;
2136         uint32_t data_sent_thistime;
2137         uint16_t auth_len;
2138         uint16_t frag_len;
2139         uint8_t flags = 0;
2140         uint32_t ss_padding;
2141         uint32_t data_left;
2142         char pad[8] = { 0, };
2143         NTSTATUS status;
2144
2145         data_left = prs_offset(state->req_data) - state->req_data_sent;
2146
2147         data_sent_thistime = calculate_data_len_tosend(
2148                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2149
2150         if (state->req_data_sent == 0) {
2151                 flags = RPC_FLG_FIRST;
2152         }
2153
2154         if (data_sent_thistime == data_left) {
2155                 flags |= RPC_FLG_LAST;
2156         }
2157
2158         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2159                 return NT_STATUS_NO_MEMORY;
2160         }
2161
2162         /* Create and marshall the header and request header. */
2163         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2164                      auth_len);
2165
2166         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2167                 return NT_STATUS_NO_MEMORY;
2168         }
2169
2170         /* Create the rpc request RPC_HDR_REQ */
2171         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2172                          state->op_num);
2173
2174         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2175                                 &state->outgoing_frag, 0)) {
2176                 return NT_STATUS_NO_MEMORY;
2177         }
2178
2179         /* Copy in the data, plus any ss padding. */
2180         if (!prs_append_some_prs_data(&state->outgoing_frag,
2181                                       state->req_data, state->req_data_sent,
2182                                       data_sent_thistime)) {
2183                 return NT_STATUS_NO_MEMORY;
2184         }
2185
2186         /* Copy the sign/seal padding data. */
2187         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2188                 return NT_STATUS_NO_MEMORY;
2189         }
2190
2191         /* Generate any auth sign/seal and add the auth footer. */
2192         switch (state->cli->auth->auth_type) {
2193         case PIPE_AUTH_TYPE_NONE:
2194                 status = NT_STATUS_OK;
2195                 break;
2196         case PIPE_AUTH_TYPE_NTLMSSP:
2197         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2198                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2199                                                  &state->outgoing_frag);
2200                 break;
2201         case PIPE_AUTH_TYPE_SCHANNEL:
2202                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2203                                                   &state->outgoing_frag);
2204                 break;
2205         default:
2206                 status = NT_STATUS_INVALID_PARAMETER;
2207                 break;
2208         }
2209
2210         state->req_data_sent += data_sent_thistime;
2211         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2212
2213         return status;
2214 }
2215
2216 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2217 {
2218         struct async_req *req = tevent_req_callback_data(
2219                 subreq, struct async_req);
2220         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2221                 req->private_data, struct rpc_api_pipe_req_state);
2222         struct async_req *subreq2;
2223         NTSTATUS status;
2224         bool is_last_frag;
2225
2226         status = rpc_write_recv(subreq);
2227         TALLOC_FREE(subreq);
2228         if (!NT_STATUS_IS_OK(status)) {
2229                 async_req_nterror(req, status);
2230                 return;
2231         }
2232
2233         status = prepare_next_frag(state, &is_last_frag);
2234         if (!NT_STATUS_IS_OK(status)) {
2235                 async_req_nterror(req, status);
2236                 return;
2237         }
2238
2239         if (is_last_frag) {
2240                 subreq2 = rpc_api_pipe_send(state, state->ev, state->cli,
2241                                            &state->outgoing_frag,
2242                                            RPC_RESPONSE);
2243                 if (async_req_nomem(subreq2, req)) {
2244                         return;
2245                 }
2246                 subreq2->async.fn = rpc_api_pipe_req_done;
2247                 subreq2->async.priv = req;
2248         } else {
2249                 subreq = rpc_write_send(
2250                         state, state->ev,
2251                         state->cli->transport,
2252                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2253                         prs_offset(&state->outgoing_frag));
2254                 if (async_req_nomem(subreq, req)) {
2255                         return;
2256                 }
2257                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2258                                         req);
2259         }
2260 }
2261
2262 static void rpc_api_pipe_req_done(struct async_req *subreq)
2263 {
2264         struct async_req *req = talloc_get_type_abort(
2265                 subreq->async.priv, struct async_req);
2266         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2267                 req->private_data, struct rpc_api_pipe_req_state);
2268         NTSTATUS status;
2269
2270         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2271         TALLOC_FREE(subreq);
2272         if (!NT_STATUS_IS_OK(status)) {
2273                 async_req_nterror(req, status);
2274                 return;
2275         }
2276         async_req_done(req);
2277 }
2278
2279 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2280                                prs_struct *reply_pdu)
2281 {
2282         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2283                 req->private_data, struct rpc_api_pipe_req_state);
2284         NTSTATUS status;
2285
2286         if (async_req_is_nterror(req, &status)) {
2287                 /*
2288                  * We always have to initialize to reply pdu, even if there is
2289                  * none. The rpccli_* caller routines expect this.
2290                  */
2291                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2292                 return status;
2293         }
2294
2295         *reply_pdu = state->reply_pdu;
2296         reply_pdu->mem_ctx = mem_ctx;
2297
2298         /*
2299          * Prevent state->req_pdu from being freed in
2300          * rpc_api_pipe_req_state_destructor()
2301          */
2302         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2303
2304         return NT_STATUS_OK;
2305 }
2306
2307 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2308                         uint8 op_num,
2309                         prs_struct *in_data,
2310                         prs_struct *out_data)
2311 {
2312         TALLOC_CTX *frame = talloc_stackframe();
2313         struct event_context *ev;
2314         struct async_req *req;
2315         NTSTATUS status = NT_STATUS_NO_MEMORY;
2316
2317         ev = event_context_init(frame);
2318         if (ev == NULL) {
2319                 goto fail;
2320         }
2321
2322         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2323         if (req == NULL) {
2324                 goto fail;
2325         }
2326
2327         while (req->state < ASYNC_REQ_DONE) {
2328                 event_loop_once(ev);
2329         }
2330
2331         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2332  fail:
2333         TALLOC_FREE(frame);
2334         return status;
2335 }
2336
2337 #if 0
2338 /****************************************************************************
2339  Set the handle state.
2340 ****************************************************************************/
2341
2342 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2343                                    const char *pipe_name, uint16 device_state)
2344 {
2345         bool state_set = False;
2346         char param[2];
2347         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2348         char *rparam = NULL;
2349         char *rdata = NULL;
2350         uint32 rparam_len, rdata_len;
2351
2352         if (pipe_name == NULL)
2353                 return False;
2354
2355         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2356                  cli->fnum, pipe_name, device_state));
2357
2358         /* create parameters: device state */
2359         SSVAL(param, 0, device_state);
2360
2361         /* create setup parameters. */
2362         setup[0] = 0x0001; 
2363         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2364
2365         /* send the data on \PIPE\ */
2366         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2367                     setup, 2, 0,                /* setup, length, max */
2368                     param, 2, 0,                /* param, length, max */
2369                     NULL, 0, 1024,              /* data, length, max */
2370                     &rparam, &rparam_len,        /* return param, length */
2371                     &rdata, &rdata_len))         /* return data, length */
2372         {
2373                 DEBUG(5, ("Set Handle state: return OK\n"));
2374                 state_set = True;
2375         }
2376
2377         SAFE_FREE(rparam);
2378         SAFE_FREE(rdata);
2379
2380         return state_set;
2381 }
2382 #endif
2383
2384 /****************************************************************************
2385  Check the rpc bind acknowledge response.
2386 ****************************************************************************/
2387
2388 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2389 {
2390         if ( hdr_ba->addr.len == 0) {
2391                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2392         }
2393
2394         /* check the transfer syntax */
2395         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2396              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2397                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2398                 return False;
2399         }
2400
2401         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2402                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2403                           hdr_ba->res.num_results, hdr_ba->res.reason));
2404         }
2405
2406         DEBUG(5,("check_bind_response: accepted!\n"));
2407         return True;
2408 }
2409
2410 /*******************************************************************
2411  Creates a DCE/RPC bind authentication response.
2412  This is the packet that is sent back to the server once we
2413  have received a BIND-ACK, to finish the third leg of
2414  the authentication handshake.
2415  ********************************************************************/
2416
2417 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2418                                 uint32 rpc_call_id,
2419                                 enum pipe_auth_type auth_type,
2420                                 enum pipe_auth_level auth_level,
2421                                 DATA_BLOB *pauth_blob,
2422                                 prs_struct *rpc_out)
2423 {
2424         RPC_HDR hdr;
2425         RPC_HDR_AUTH hdr_auth;
2426         uint32 pad = 0;
2427
2428         /* Create the request RPC_HDR */
2429         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2430                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2431                      pauth_blob->length );
2432
2433         /* Marshall it. */
2434         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2435                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2436                 return NT_STATUS_NO_MEMORY;
2437         }
2438
2439         /*
2440                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2441                 about padding - shouldn't this pad to length 8 ? JRA.
2442         */
2443
2444         /* 4 bytes padding. */
2445         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2446                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2447                 return NT_STATUS_NO_MEMORY;
2448         }
2449
2450         /* Create the request RPC_HDR_AUTHA */
2451         init_rpc_hdr_auth(&hdr_auth,
2452                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2453                         auth_level, 0, 1);
2454
2455         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2456                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2457                 return NT_STATUS_NO_MEMORY;
2458         }
2459
2460         /*
2461          * Append the auth data to the outgoing buffer.
2462          */
2463
2464         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2465                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2466                 return NT_STATUS_NO_MEMORY;
2467         }
2468
2469         return NT_STATUS_OK;
2470 }
2471
2472 /*******************************************************************
2473  Creates a DCE/RPC bind alter context authentication request which
2474  may contain a spnego auth blobl
2475  ********************************************************************/
2476
2477 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2478                                         const RPC_IFACE *abstract,
2479                                         const RPC_IFACE *transfer,
2480                                         enum pipe_auth_level auth_level,
2481                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2482                                         prs_struct *rpc_out)
2483 {
2484         RPC_HDR_AUTH hdr_auth;
2485         prs_struct auth_info;
2486         NTSTATUS ret = NT_STATUS_OK;
2487
2488         ZERO_STRUCT(hdr_auth);
2489         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2490                 return NT_STATUS_NO_MEMORY;
2491
2492         /* We may change the pad length before marshalling. */
2493         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2494
2495         if (pauth_blob->length) {
2496                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2497                         prs_mem_free(&auth_info);
2498                         return NT_STATUS_NO_MEMORY;
2499                 }
2500         }
2501
2502         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2503                                                 rpc_out, 
2504                                                 rpc_call_id,
2505                                                 abstract,
2506                                                 transfer,
2507                                                 &hdr_auth,
2508                                                 &auth_info);
2509         prs_mem_free(&auth_info);
2510         return ret;
2511 }
2512
2513 /****************************************************************************
2514  Do an rpc bind.
2515 ****************************************************************************/
2516
2517 struct rpc_pipe_bind_state {
2518         struct event_context *ev;
2519         struct rpc_pipe_client *cli;
2520         prs_struct rpc_out;
2521         uint32_t rpc_call_id;
2522 };
2523
2524 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2525 {
2526         prs_mem_free(&state->rpc_out);
2527         return 0;
2528 }
2529
2530 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2531 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2532                                            struct rpc_pipe_bind_state *state,
2533                                            struct rpc_hdr_info *phdr,
2534                                            prs_struct *reply_pdu);
2535 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2536 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2537                                                     struct rpc_pipe_bind_state *state,
2538                                                     struct rpc_hdr_info *phdr,
2539                                                     prs_struct *reply_pdu);
2540 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2541
2542 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2543                                      struct event_context *ev,
2544                                      struct rpc_pipe_client *cli,
2545                                      struct cli_pipe_auth_data *auth)
2546 {
2547         struct async_req *result, *subreq;
2548         struct rpc_pipe_bind_state *state;
2549         NTSTATUS status;
2550
2551         if (!async_req_setup(mem_ctx, &result, &state,
2552                              struct rpc_pipe_bind_state)) {
2553                 return NULL;
2554         }
2555
2556         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2557                 rpccli_pipe_txt(debug_ctx(), cli),
2558                 (unsigned int)auth->auth_type,
2559                 (unsigned int)auth->auth_level ));
2560
2561         state->ev = ev;
2562         state->cli = cli;
2563         state->rpc_call_id = get_rpc_call_id();
2564
2565         prs_init_empty(&state->rpc_out, state, MARSHALL);
2566         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2567
2568         cli->auth = talloc_move(cli, &auth);
2569
2570         /* Marshall the outgoing data. */
2571         status = create_rpc_bind_req(cli, &state->rpc_out,
2572                                      state->rpc_call_id,
2573                                      &cli->abstract_syntax,
2574                                      &cli->transfer_syntax,
2575                                      cli->auth->auth_type,
2576                                      cli->auth->auth_level);
2577
2578         if (!NT_STATUS_IS_OK(status)) {
2579                 goto post_status;
2580         }
2581
2582         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2583                                    RPC_BINDACK);
2584         if (subreq == NULL) {
2585                 status = NT_STATUS_NO_MEMORY;
2586                 goto post_status;
2587         }
2588         subreq->async.fn = rpc_pipe_bind_step_one_done;
2589         subreq->async.priv = result;
2590         return result;
2591
2592  post_status:
2593         if (async_post_ntstatus(result, ev, status)) {
2594                 return result;
2595         }
2596         TALLOC_FREE(result);
2597         return NULL;
2598 }
2599
2600 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2601 {
2602         struct async_req *req = talloc_get_type_abort(
2603                 subreq->async.priv, struct async_req);
2604         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2605                 req->private_data, struct rpc_pipe_bind_state);
2606         prs_struct reply_pdu;
2607         struct rpc_hdr_info hdr;
2608         struct rpc_hdr_ba_info hdr_ba;
2609         NTSTATUS status;
2610
2611         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2612         TALLOC_FREE(subreq);
2613         if (!NT_STATUS_IS_OK(status)) {
2614                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2615                           rpccli_pipe_txt(debug_ctx(), state->cli),
2616                           nt_errstr(status)));
2617                 async_req_nterror(req, status);
2618                 return;
2619         }
2620
2621         /* Unmarshall the RPC header */
2622         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2623                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2624                 prs_mem_free(&reply_pdu);
2625                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2626                 return;
2627         }
2628
2629         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2630                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2631                           "RPC_HDR_BA.\n"));
2632                 prs_mem_free(&reply_pdu);
2633                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2634                 return;
2635         }
2636
2637         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2638                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2639                 prs_mem_free(&reply_pdu);
2640                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2641                 return;
2642         }
2643
2644         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2645         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2646
2647         /*
2648          * For authenticated binds we may need to do 3 or 4 leg binds.
2649          */
2650
2651         switch(state->cli->auth->auth_type) {
2652
2653         case PIPE_AUTH_TYPE_NONE:
2654         case PIPE_AUTH_TYPE_SCHANNEL:
2655                 /* Bind complete. */
2656                 prs_mem_free(&reply_pdu);
2657                 async_req_done(req);
2658                 break;
2659
2660         case PIPE_AUTH_TYPE_NTLMSSP:
2661                 /* Need to send AUTH3 packet - no reply. */
2662                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2663                                                     &reply_pdu);
2664                 prs_mem_free(&reply_pdu);
2665                 if (!NT_STATUS_IS_OK(status)) {
2666                         async_req_nterror(req, status);
2667                 }
2668                 break;
2669
2670         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2671                 /* Need to send alter context request and reply. */
2672                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2673                                                              &reply_pdu);
2674                 prs_mem_free(&reply_pdu);
2675                 if (!NT_STATUS_IS_OK(status)) {
2676                         async_req_nterror(req, status);
2677                 }
2678                 break;
2679
2680         case PIPE_AUTH_TYPE_KRB5:
2681                 /* */
2682
2683         default:
2684                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2685                          (unsigned int)state->cli->auth->auth_type));
2686                 prs_mem_free(&reply_pdu);
2687                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2688         }
2689 }
2690
2691 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2692                                            struct rpc_pipe_bind_state *state,
2693                                            struct rpc_hdr_info *phdr,
2694                                            prs_struct *reply_pdu)
2695 {
2696         DATA_BLOB server_response = data_blob_null;
2697         DATA_BLOB client_reply = data_blob_null;
2698         struct rpc_hdr_auth_info hdr_auth;
2699         struct tevent_req *subreq;
2700         NTSTATUS status;
2701
2702         if ((phdr->auth_len == 0)
2703             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2704                 return NT_STATUS_INVALID_PARAMETER;
2705         }
2706
2707         if (!prs_set_offset(
2708                     reply_pdu,
2709                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2710                 return NT_STATUS_INVALID_PARAMETER;
2711         }
2712
2713         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2714                 return NT_STATUS_INVALID_PARAMETER;
2715         }
2716
2717         /* TODO - check auth_type/auth_level match. */
2718
2719         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2720         prs_copy_data_out((char *)server_response.data, reply_pdu,
2721                           phdr->auth_len);
2722
2723         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2724                                 server_response, &client_reply);
2725
2726         if (!NT_STATUS_IS_OK(status)) {
2727                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2728                           "blob failed: %s.\n", nt_errstr(status)));
2729                 return status;
2730         }
2731
2732         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2733
2734         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2735                                        state->cli->auth->auth_type,
2736                                        state->cli->auth->auth_level,
2737                                        &client_reply, &state->rpc_out);
2738         data_blob_free(&client_reply);
2739
2740         if (!NT_STATUS_IS_OK(status)) {
2741                 return status;
2742         }
2743
2744         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2745                                 (uint8_t *)prs_data_p(&state->rpc_out),
2746                                 prs_offset(&state->rpc_out));
2747         if (subreq == NULL) {
2748                 return NT_STATUS_NO_MEMORY;
2749         }
2750         tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2751         return NT_STATUS_OK;
2752 }
2753
2754 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2755 {
2756         struct async_req *req = tevent_req_callback_data(
2757                 subreq, struct async_req);
2758         NTSTATUS status;
2759
2760         status = rpc_write_recv(subreq);
2761         TALLOC_FREE(subreq);
2762         if (!NT_STATUS_IS_OK(status)) {
2763                 async_req_nterror(req, status);
2764                 return;
2765         }
2766         async_req_done(req);
2767 }
2768
2769 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2770                                                     struct rpc_pipe_bind_state *state,
2771                                                     struct rpc_hdr_info *phdr,
2772                                                     prs_struct *reply_pdu)
2773 {
2774         DATA_BLOB server_spnego_response = data_blob_null;
2775         DATA_BLOB server_ntlm_response = data_blob_null;
2776         DATA_BLOB client_reply = data_blob_null;
2777         DATA_BLOB tmp_blob = data_blob_null;
2778         RPC_HDR_AUTH hdr_auth;
2779         struct async_req *subreq;
2780         NTSTATUS status;
2781
2782         if ((phdr->auth_len == 0)
2783             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2784                 return NT_STATUS_INVALID_PARAMETER;
2785         }
2786
2787         /* Process the returned NTLMSSP blob first. */
2788         if (!prs_set_offset(
2789                     reply_pdu,
2790                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2791                 return NT_STATUS_INVALID_PARAMETER;
2792         }
2793
2794         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2795                 return NT_STATUS_INVALID_PARAMETER;
2796         }
2797
2798         server_spnego_response = data_blob(NULL, phdr->auth_len);
2799         prs_copy_data_out((char *)server_spnego_response.data,
2800                           reply_pdu, phdr->auth_len);
2801
2802         /*
2803          * The server might give us back two challenges - tmp_blob is for the
2804          * second.
2805          */
2806         if (!spnego_parse_challenge(server_spnego_response,
2807                                     &server_ntlm_response, &tmp_blob)) {
2808                 data_blob_free(&server_spnego_response);
2809                 data_blob_free(&server_ntlm_response);
2810                 data_blob_free(&tmp_blob);
2811                 return NT_STATUS_INVALID_PARAMETER;
2812         }
2813
2814         /* We're finished with the server spnego response and the tmp_blob. */
2815         data_blob_free(&server_spnego_response);
2816         data_blob_free(&tmp_blob);
2817
2818         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2819                                 server_ntlm_response, &client_reply);
2820
2821         /* Finished with the server_ntlm response */
2822         data_blob_free(&server_ntlm_response);
2823
2824         if (!NT_STATUS_IS_OK(status)) {
2825                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2826                           "using server blob failed.\n"));
2827                 data_blob_free(&client_reply);
2828                 return status;
2829         }
2830
2831         /* SPNEGO wrap the client reply. */
2832         tmp_blob = spnego_gen_auth(client_reply);
2833         data_blob_free(&client_reply);
2834         client_reply = tmp_blob;
2835         tmp_blob = data_blob_null;
2836
2837         /* Now prepare the alter context pdu. */
2838         prs_init_empty(&state->rpc_out, state, MARSHALL);
2839
2840         status = create_rpc_alter_context(state->rpc_call_id,
2841                                           &state->cli->abstract_syntax,
2842                                           &state->cli->transfer_syntax,
2843                                           state->cli->auth->auth_level,
2844                                           &client_reply,
2845                                           &state->rpc_out);
2846         data_blob_free(&client_reply);
2847
2848         if (!NT_STATUS_IS_OK(status)) {
2849                 return status;
2850         }
2851
2852         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2853                                    &state->rpc_out, RPC_ALTCONTRESP);
2854         if (subreq == NULL) {
2855                 return NT_STATUS_NO_MEMORY;
2856         }
2857         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2858         subreq->async.priv = req;
2859         return NT_STATUS_OK;
2860 }
2861
2862 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2863 {
2864         struct async_req *req = talloc_get_type_abort(
2865                 subreq->async.priv, struct async_req);
2866         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2867                 req->private_data, struct rpc_pipe_bind_state);
2868         DATA_BLOB server_spnego_response = data_blob_null;
2869         DATA_BLOB tmp_blob = data_blob_null;
2870         prs_struct reply_pdu;
2871         struct rpc_hdr_info hdr;
2872         struct rpc_hdr_auth_info hdr_auth;
2873         NTSTATUS status;
2874
2875         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2876         TALLOC_FREE(subreq);
2877         if (!NT_STATUS_IS_OK(status)) {
2878                 async_req_nterror(req, status);
2879                 return;
2880         }
2881
2882         /* Get the auth blob from the reply. */
2883         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2884                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2885                           "unmarshall RPC_HDR.\n"));
2886                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2887                 return;
2888         }
2889
2890         if (!prs_set_offset(
2891                     &reply_pdu,
2892                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2893                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2894                 return;
2895         }
2896
2897         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2898                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2899                 return;
2900         }
2901
2902         server_spnego_response = data_blob(NULL, hdr.auth_len);
2903         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2904                           hdr.auth_len);
2905
2906         /* Check we got a valid auth response. */
2907         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2908                                         OID_NTLMSSP, &tmp_blob)) {
2909                 data_blob_free(&server_spnego_response);
2910                 data_blob_free(&tmp_blob);
2911                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2912                 return;
2913         }
2914
2915         data_blob_free(&server_spnego_response);
2916         data_blob_free(&tmp_blob);
2917
2918         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2919                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2920         async_req_done(req);
2921 }
2922
2923 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2924 {
2925         return async_req_simple_recv_ntstatus(req);
2926 }
2927
2928 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2929                        struct cli_pipe_auth_data *auth)
2930 {
2931         TALLOC_CTX *frame = talloc_stackframe();
2932         struct event_context *ev;
2933         struct async_req *req;
2934         NTSTATUS status = NT_STATUS_NO_MEMORY;
2935
2936         ev = event_context_init(frame);
2937         if (ev == NULL) {
2938                 goto fail;
2939         }
2940
2941         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2942         if (req == NULL) {
2943                 goto fail;
2944         }
2945
2946         while (req->state < ASYNC_REQ_DONE) {
2947                 event_loop_once(ev);
2948         }
2949
2950         status = rpc_pipe_bind_recv(req);
2951  fail:
2952         TALLOC_FREE(frame);
2953         return status;
2954 }
2955
2956 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2957                                 unsigned int timeout)
2958 {
2959         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2960
2961         if (cli == NULL) {
2962                 return 0;
2963         }
2964         return cli_set_timeout(cli, timeout);
2965 }
2966
2967 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2968 {
2969         struct cli_state *cli;
2970
2971         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2972             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2973                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2974                 return true;
2975         }
2976
2977         cli = rpc_pipe_np_smb_conn(rpc_cli);
2978         if (cli == NULL) {
2979                 return false;
2980         }
2981         E_md4hash(cli->password ? cli->password : "", nt_hash);
2982         return true;
2983 }
2984
2985 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2986                                struct cli_pipe_auth_data **presult)
2987 {
2988         struct cli_pipe_auth_data *result;
2989
2990         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2991         if (result == NULL) {
2992                 return NT_STATUS_NO_MEMORY;
2993         }
2994
2995         result->auth_type = PIPE_AUTH_TYPE_NONE;
2996         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2997
2998         result->user_name = talloc_strdup(result, "");
2999         result->domain = talloc_strdup(result, "");
3000         if ((result->user_name == NULL) || (result->domain == NULL)) {
3001                 TALLOC_FREE(result);
3002                 return NT_STATUS_NO_MEMORY;
3003         }
3004
3005         *presult = result;
3006         return NT_STATUS_OK;
3007 }
3008
3009 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3010 {
3011         ntlmssp_end(&auth->a_u.ntlmssp_state);
3012         return 0;
3013 }
3014
3015 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3016                                   enum pipe_auth_type auth_type,
3017                                   enum pipe_auth_level auth_level,
3018                                   const char *domain,
3019                                   const char *username,
3020                                   const char *password,
3021                                   struct cli_pipe_auth_data **presult)
3022 {
3023         struct cli_pipe_auth_data *result;
3024         NTSTATUS status;
3025
3026         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3027         if (result == NULL) {
3028                 return NT_STATUS_NO_MEMORY;
3029         }
3030
3031         result->auth_type = auth_type;
3032         result->auth_level = auth_level;
3033
3034         result->user_name = talloc_strdup(result, username);
3035         result->domain = talloc_strdup(result, domain);
3036         if ((result->user_name == NULL) || (result->domain == NULL)) {
3037                 status = NT_STATUS_NO_MEMORY;
3038                 goto fail;
3039         }
3040
3041         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3042         if (!NT_STATUS_IS_OK(status)) {
3043                 goto fail;
3044         }
3045
3046         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3047
3048         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3049         if (!NT_STATUS_IS_OK(status)) {
3050                 goto fail;
3051         }
3052
3053         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3054         if (!NT_STATUS_IS_OK(status)) {
3055                 goto fail;
3056         }
3057
3058         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3059         if (!NT_STATUS_IS_OK(status)) {
3060                 goto fail;
3061         }
3062
3063         /*
3064          * Turn off sign+seal to allow selected auth level to turn it back on.
3065          */
3066         result->a_u.ntlmssp_state->neg_flags &=
3067                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3068
3069         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3070                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3071         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3072                 result->a_u.ntlmssp_state->neg_flags
3073                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3074         }
3075
3076         *presult = result;
3077         return NT_STATUS_OK;
3078
3079  fail:
3080         TALLOC_FREE(result);
3081         return status;
3082 }
3083
3084 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3085                                    enum pipe_auth_level auth_level,
3086                                    const uint8_t sess_key[16],
3087                                    struct cli_pipe_auth_data **presult)
3088 {
3089         struct cli_pipe_auth_data *result;
3090
3091         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3092         if (result == NULL) {
3093                 return NT_STATUS_NO_MEMORY;
3094         }
3095
3096         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3097         result->auth_level = auth_level;
3098
3099         result->user_name = talloc_strdup(result, "");
3100         result->domain = talloc_strdup(result, domain);
3101         if ((result->user_name == NULL) || (result->domain == NULL)) {
3102                 goto fail;
3103         }
3104
3105         result->a_u.schannel_auth = talloc(result,
3106                                            struct schannel_auth_struct);
3107         if (result->a_u.schannel_auth == NULL) {
3108                 goto fail;
3109         }
3110
3111         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3112                sizeof(result->a_u.schannel_auth->sess_key));
3113         result->a_u.schannel_auth->seq_num = 0;
3114
3115         *presult = result;
3116         return NT_STATUS_OK;
3117
3118  fail:
3119         TALLOC_FREE(result);
3120         return NT_STATUS_NO_MEMORY;
3121 }
3122
3123 #ifdef HAVE_KRB5
3124 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3125 {
3126         data_blob_free(&auth->session_key);
3127         return 0;
3128 }
3129 #endif
3130
3131 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3132                                    enum pipe_auth_level auth_level,
3133                                    const char *service_princ,
3134                                    const char *username,
3135                                    const char *password,
3136                                    struct cli_pipe_auth_data **presult)
3137 {
3138 #ifdef HAVE_KRB5
3139         struct cli_pipe_auth_data *result;
3140
3141         if ((username != NULL) && (password != NULL)) {
3142                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3143                 if (ret != 0) {
3144                         return NT_STATUS_ACCESS_DENIED;
3145                 }
3146         }
3147
3148         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3149         if (result == NULL) {
3150                 return NT_STATUS_NO_MEMORY;
3151         }
3152
3153         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3154         result->auth_level = auth_level;
3155
3156         /*
3157          * Username / domain need fixing!
3158          */
3159         result->user_name = talloc_strdup(result, "");
3160         result->domain = talloc_strdup(result, "");
3161         if ((result->user_name == NULL) || (result->domain == NULL)) {
3162                 goto fail;
3163         }
3164
3165         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3166                 result, struct kerberos_auth_struct);
3167         if (result->a_u.kerberos_auth == NULL) {
3168                 goto fail;
3169         }
3170         talloc_set_destructor(result->a_u.kerberos_auth,
3171                               cli_auth_kerberos_data_destructor);
3172
3173         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3174                 result, service_princ);
3175         if (result->a_u.kerberos_auth->service_principal == NULL) {
3176                 goto fail;
3177         }
3178
3179         *presult = result;
3180         return NT_STATUS_OK;
3181
3182  fail:
3183         TALLOC_FREE(result);
3184         return NT_STATUS_NO_MEMORY;
3185 #else
3186         return NT_STATUS_NOT_SUPPORTED;
3187 #endif
3188 }
3189
3190 /**
3191  * Create an rpc pipe client struct, connecting to a tcp port.
3192  */
3193 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3194                                        uint16_t port,
3195                                        const struct ndr_syntax_id *abstract_syntax,
3196                                        struct rpc_pipe_client **presult)
3197 {
3198         struct rpc_pipe_client *result;
3199         struct sockaddr_storage addr;
3200         NTSTATUS status;
3201         int fd;
3202
3203         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3204         if (result == NULL) {
3205                 return NT_STATUS_NO_MEMORY;
3206         }
3207
3208         result->abstract_syntax = *abstract_syntax;
3209         result->transfer_syntax = ndr_transfer_syntax;
3210         result->dispatch = cli_do_rpc_ndr;
3211
3212         result->desthost = talloc_strdup(result, host);
3213         result->srv_name_slash = talloc_asprintf_strupper_m(
3214                 result, "\\\\%s", result->desthost);
3215         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3216                 status = NT_STATUS_NO_MEMORY;
3217                 goto fail;
3218         }
3219
3220         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3221         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3222
3223         if (!resolve_name(host, &addr, 0)) {
3224                 status = NT_STATUS_NOT_FOUND;
3225                 goto fail;
3226         }
3227
3228         status = open_socket_out(&addr, port, 60, &fd);
3229         if (!NT_STATUS_IS_OK(status)) {
3230                 goto fail;
3231         }
3232         set_socket_options(fd, lp_socket_options());
3233
3234         status = rpc_transport_sock_init(result, fd, &result->transport);
3235         if (!NT_STATUS_IS_OK(status)) {
3236                 close(fd);
3237                 goto fail;
3238         }
3239
3240         *presult = result;
3241         return NT_STATUS_OK;
3242
3243  fail:
3244         TALLOC_FREE(result);
3245         return status;
3246 }
3247
3248 /**
3249  * Determine the tcp port on which a dcerpc interface is listening
3250  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3251  * target host.
3252  */
3253 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3254                                       const struct ndr_syntax_id *abstract_syntax,
3255                                       uint16_t *pport)
3256 {
3257         NTSTATUS status;
3258         struct rpc_pipe_client *epm_pipe = NULL;
3259         struct cli_pipe_auth_data *auth = NULL;
3260         struct dcerpc_binding *map_binding = NULL;
3261         struct dcerpc_binding *res_binding = NULL;
3262         struct epm_twr_t *map_tower = NULL;
3263         struct epm_twr_t *res_towers = NULL;
3264         struct policy_handle *entry_handle = NULL;
3265         uint32_t num_towers = 0;
3266         uint32_t max_towers = 1;
3267         struct epm_twr_p_t towers;
3268         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3269
3270         if (pport == NULL) {
3271                 status = NT_STATUS_INVALID_PARAMETER;
3272                 goto done;
3273         }
3274
3275         /* open the connection to the endpoint mapper */
3276         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3277                                         &ndr_table_epmapper.syntax_id,
3278                                         &epm_pipe);
3279
3280         if (!NT_STATUS_IS_OK(status)) {
3281                 goto done;
3282         }
3283
3284         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3285         if (!NT_STATUS_IS_OK(status)) {
3286                 goto done;
3287         }
3288
3289         status = rpc_pipe_bind(epm_pipe, auth);
3290         if (!NT_STATUS_IS_OK(status)) {
3291                 goto done;
3292         }
3293
3294         /* create tower for asking the epmapper */
3295
3296         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3297         if (map_binding == NULL) {
3298                 status = NT_STATUS_NO_MEMORY;
3299                 goto done;
3300         }
3301
3302         map_binding->transport = NCACN_IP_TCP;
3303         map_binding->object = *abstract_syntax;
3304         map_binding->host = host; /* needed? */
3305         map_binding->endpoint = "0"; /* correct? needed? */
3306
3307         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3308         if (map_tower == NULL) {
3309                 status = NT_STATUS_NO_MEMORY;
3310                 goto done;
3311         }
3312
3313         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3314                                             &(map_tower->tower));
3315         if (!NT_STATUS_IS_OK(status)) {
3316                 goto done;
3317         }
3318
3319         /* allocate further parameters for the epm_Map call */
3320
3321         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3322         if (res_towers == NULL) {
3323                 status = NT_STATUS_NO_MEMORY;
3324                 goto done;
3325         }
3326         towers.twr = res_towers;
3327
3328         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3329         if (entry_handle == NULL) {
3330                 status = NT_STATUS_NO_MEMORY;
3331                 goto done;
3332         }
3333
3334         /* ask the endpoint mapper for the port */
3335
3336         status = rpccli_epm_Map(epm_pipe,
3337                                 tmp_ctx,
3338                                 CONST_DISCARD(struct GUID *,
3339                                               &(abstract_syntax->uuid)),
3340                                 map_tower,
3341                                 entry_handle,
3342                                 max_towers,
3343                                 &num_towers,
3344                                 &towers);
3345
3346         if (!NT_STATUS_IS_OK(status)) {
3347                 goto done;
3348         }
3349
3350         if (num_towers != 1) {
3351                 status = NT_STATUS_UNSUCCESSFUL;
3352                 goto done;
3353         }
3354
3355         /* extract the port from the answer */
3356
3357         status = dcerpc_binding_from_tower(tmp_ctx,
3358                                            &(towers.twr->tower),
3359                                            &res_binding);
3360         if (!NT_STATUS_IS_OK(status)) {
3361                 goto done;
3362         }
3363
3364         /* are further checks here necessary? */
3365         if (res_binding->transport != NCACN_IP_TCP) {
3366                 status = NT_STATUS_UNSUCCESSFUL;
3367                 goto done;
3368         }
3369
3370         *pport = (uint16_t)atoi(res_binding->endpoint);
3371
3372 done:
3373         TALLOC_FREE(tmp_ctx);
3374         return status;
3375 }
3376
3377 /**
3378  * Create a rpc pipe client struct, connecting to a host via tcp.
3379  * The port is determined by asking the endpoint mapper on the given
3380  * host.
3381  */
3382 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3383                            const struct ndr_syntax_id *abstract_syntax,
3384                            struct rpc_pipe_client **presult)
3385 {
3386         NTSTATUS status;
3387         uint16_t port = 0;
3388
3389         *presult = NULL;
3390
3391         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3392         if (!NT_STATUS_IS_OK(status)) {
3393                 goto done;
3394         }
3395
3396         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3397                                         abstract_syntax, presult);
3398
3399 done:
3400         return status;
3401 }
3402
3403 /********************************************************************
3404  Create a rpc pipe client struct, connecting to a unix domain socket
3405  ********************************************************************/
3406 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3407                                const struct ndr_syntax_id *abstract_syntax,
3408                                struct rpc_pipe_client **presult)
3409 {
3410         struct rpc_pipe_client *result;
3411         struct sockaddr_un addr;
3412         NTSTATUS status;
3413         int fd;
3414
3415         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3416         if (result == NULL) {
3417                 return NT_STATUS_NO_MEMORY;
3418         }
3419
3420         result->abstract_syntax = *abstract_syntax;
3421         result->transfer_syntax = ndr_transfer_syntax;
3422         result->dispatch = cli_do_rpc_ndr;
3423
3424         result->desthost = get_myname(result);
3425         result->srv_name_slash = talloc_asprintf_strupper_m(
3426                 result, "\\\\%s", result->desthost);
3427         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3428                 status = NT_STATUS_NO_MEMORY;
3429                 goto fail;
3430         }
3431
3432         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3433         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3434
3435         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3436         if (fd == -1) {
3437                 status = map_nt_error_from_unix(errno);
3438                 goto fail;
3439         }
3440
3441         ZERO_STRUCT(addr);
3442         addr.sun_family = AF_UNIX;
3443         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3444
3445         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3446                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3447                           strerror(errno)));
3448                 close(fd);
3449                 return map_nt_error_from_unix(errno);
3450         }
3451
3452         status = rpc_transport_sock_init(result, fd, &result->transport);
3453         if (!NT_STATUS_IS_OK(status)) {
3454                 close(fd);
3455                 goto fail;
3456         }
3457
3458         *presult = result;
3459         return NT_STATUS_OK;
3460
3461  fail:
3462         TALLOC_FREE(result);
3463         return status;
3464 }
3465
3466 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3467 {
3468         struct cli_state *cli;
3469
3470         cli = rpc_pipe_np_smb_conn(p);
3471         if (cli != NULL) {
3472                 DLIST_REMOVE(cli->pipe_list, p);
3473         }
3474         return 0;
3475 }
3476
3477 /****************************************************************************
3478  Open a named pipe over SMB to a remote server.
3479  *
3480  * CAVEAT CALLER OF THIS FUNCTION:
3481  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3482  *    so be sure that this function is called AFTER any structure (vs pointer)
3483  *    assignment of the cli.  In particular, libsmbclient does structure
3484  *    assignments of cli, which invalidates the data in the returned
3485  *    rpc_pipe_client if this function is called before the structure assignment
3486  *    of cli.
3487  * 
3488  ****************************************************************************/
3489
3490 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3491                                  const struct ndr_syntax_id *abstract_syntax,
3492                                  struct rpc_pipe_client **presult)
3493 {
3494         struct rpc_pipe_client *result;
3495         NTSTATUS status;
3496
3497         /* sanity check to protect against crashes */
3498
3499         if ( !cli ) {
3500                 return NT_STATUS_INVALID_HANDLE;
3501         }
3502
3503         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3504         if (result == NULL) {
3505                 return NT_STATUS_NO_MEMORY;
3506         }
3507
3508         result->abstract_syntax = *abstract_syntax;
3509         result->transfer_syntax = ndr_transfer_syntax;
3510         result->dispatch = cli_do_rpc_ndr;
3511         result->desthost = talloc_strdup(result, cli->desthost);
3512         result->srv_name_slash = talloc_asprintf_strupper_m(
3513                 result, "\\\\%s", result->desthost);
3514
3515         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3516         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3517
3518         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3519                 TALLOC_FREE(result);
3520                 return NT_STATUS_NO_MEMORY;
3521         }
3522
3523         status = rpc_transport_np_init(result, cli, abstract_syntax,
3524                                        &result->transport);
3525         if (!NT_STATUS_IS_OK(status)) {
3526                 TALLOC_FREE(result);
3527                 return status;
3528         }
3529
3530         DLIST_ADD(cli->pipe_list, result);
3531         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3532
3533         *presult = result;
3534         return NT_STATUS_OK;
3535 }
3536
3537 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3538                              struct rpc_cli_smbd_conn *conn,
3539                              const struct ndr_syntax_id *syntax,
3540                              struct rpc_pipe_client **presult)
3541 {
3542         struct rpc_pipe_client *result;
3543         struct cli_pipe_auth_data *auth;
3544         NTSTATUS status;
3545
3546         result = talloc(mem_ctx, struct rpc_pipe_client);
3547         if (result == NULL) {
3548                 return NT_STATUS_NO_MEMORY;
3549         }
3550         result->abstract_syntax = *syntax;
3551         result->transfer_syntax = ndr_transfer_syntax;
3552         result->dispatch = cli_do_rpc_ndr;
3553         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3554         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3555
3556         result->desthost = talloc_strdup(result, global_myname());
3557         result->srv_name_slash = talloc_asprintf_strupper_m(
3558                 result, "\\\\%s", global_myname());
3559         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3560                 TALLOC_FREE(result);
3561                 return NT_STATUS_NO_MEMORY;
3562         }
3563
3564         status = rpc_transport_smbd_init(result, conn, syntax,
3565                                          &result->transport);
3566         if (!NT_STATUS_IS_OK(status)) {
3567                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3568                           nt_errstr(status)));
3569                 TALLOC_FREE(result);
3570                 return status;
3571         }
3572
3573         status = rpccli_anon_bind_data(result, &auth);
3574         if (!NT_STATUS_IS_OK(status)) {
3575                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3576                           nt_errstr(status)));
3577                 TALLOC_FREE(result);
3578                 return status;
3579         }
3580
3581         status = rpc_pipe_bind(result, auth);
3582         if (!NT_STATUS_IS_OK(status)) {
3583                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3584                 TALLOC_FREE(result);
3585                 return status;
3586         }
3587
3588         *presult = result;
3589         return NT_STATUS_OK;
3590 }
3591
3592 /****************************************************************************
3593  Open a pipe to a remote server.
3594  ****************************************************************************/
3595
3596 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3597                                   const struct ndr_syntax_id *interface,
3598                                   struct rpc_pipe_client **presult)
3599 {
3600         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3601                 /*
3602                  * We should have a better way to figure out this drsuapi
3603                  * speciality...
3604                  */
3605                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3606                                          presult);
3607         }
3608
3609         return rpc_pipe_open_np(cli, interface, presult);
3610 }
3611
3612 /****************************************************************************
3613  Open a named pipe to an SMB server and bind anonymously.
3614  ****************************************************************************/
3615
3616 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3617                                   const struct ndr_syntax_id *interface,
3618                                   struct rpc_pipe_client **presult)
3619 {
3620         struct rpc_pipe_client *result;
3621         struct cli_pipe_auth_data *auth;
3622         NTSTATUS status;
3623
3624         status = cli_rpc_pipe_open(cli, interface, &result);
3625         if (!NT_STATUS_IS_OK(status)) {
3626                 return status;
3627         }
3628
3629         status = rpccli_anon_bind_data(result, &auth);
3630         if (!NT_STATUS_IS_OK(status)) {
3631                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3632                           nt_errstr(status)));
3633                 TALLOC_FREE(result);
3634                 return status;
3635         }
3636
3637         /*
3638          * This is a bit of an abstraction violation due to the fact that an
3639          * anonymous bind on an authenticated SMB inherits the user/domain
3640          * from the enclosing SMB creds
3641          */
3642
3643         TALLOC_FREE(auth->user_name);
3644         TALLOC_FREE(auth->domain);
3645
3646         auth->user_name = talloc_strdup(auth, cli->user_name);
3647         auth->domain = talloc_strdup(auth, cli->domain);
3648         auth->user_session_key = data_blob_talloc(auth,
3649                 cli->user_session_key.data,
3650                 cli->user_session_key.length);
3651
3652         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3653                 TALLOC_FREE(result);
3654                 return NT_STATUS_NO_MEMORY;
3655         }
3656
3657         status = rpc_pipe_bind(result, auth);
3658         if (!NT_STATUS_IS_OK(status)) {
3659                 int lvl = 0;
3660                 if (ndr_syntax_id_equal(interface,
3661                                         &ndr_table_dssetup.syntax_id)) {
3662                         /* non AD domains just don't have this pipe, avoid
3663                          * level 0 statement in that case - gd */
3664                         lvl = 3;
3665                 }
3666                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3667                             "%s failed with error %s\n",
3668                             get_pipe_name_from_iface(interface),
3669                             nt_errstr(status) ));
3670                 TALLOC_FREE(result);
3671                 return status;
3672         }
3673
3674         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3675                   "%s and bound anonymously.\n",
3676                   get_pipe_name_from_iface(interface), cli->desthost));
3677
3678         *presult = result;
3679         return NT_STATUS_OK;
3680 }
3681
3682 /****************************************************************************
3683  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3684  ****************************************************************************/
3685
3686 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3687                                                    const struct ndr_syntax_id *interface,
3688                                                    enum pipe_auth_type auth_type,
3689                                                    enum pipe_auth_level auth_level,
3690                                                    const char *domain,
3691                                                    const char *username,
3692                                                    const char *password,
3693                                                    struct rpc_pipe_client **presult)
3694 {
3695         struct rpc_pipe_client *result;
3696         struct cli_pipe_auth_data *auth;
3697         NTSTATUS status;
3698
3699         status = cli_rpc_pipe_open(cli, interface, &result);
3700         if (!NT_STATUS_IS_OK(status)) {
3701                 return status;
3702         }
3703
3704         status = rpccli_ntlmssp_bind_data(
3705                 result, auth_type, auth_level, domain, username,
3706                 password, &auth);
3707         if (!NT_STATUS_IS_OK(status)) {
3708                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3709                           nt_errstr(status)));
3710                 goto err;
3711         }
3712
3713         status = rpc_pipe_bind(result, auth);
3714         if (!NT_STATUS_IS_OK(status)) {
3715                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3716                         nt_errstr(status) ));
3717                 goto err;
3718         }
3719
3720         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3721                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3722                   get_pipe_name_from_iface(interface), cli->desthost, domain,
3723                   username ));
3724
3725         *presult = result;
3726         return NT_STATUS_OK;
3727
3728   err:
3729
3730         TALLOC_FREE(result);
3731         return status;
3732 }
3733
3734 /****************************************************************************
3735  External interface.
3736  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3737  ****************************************************************************/
3738
3739 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3740                                    const struct ndr_syntax_id *interface,
3741                                    enum pipe_auth_level auth_level,
3742                                    const char *domain,
3743                                    const char *username,
3744                                    const char *password,
3745                                    struct rpc_pipe_client **presult)
3746 {
3747         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3748                                                 interface,
3749                                                 PIPE_AUTH_TYPE_NTLMSSP,
3750                                                 auth_level,
3751                                                 domain,
3752                                                 username,
3753                                                 password,
3754                                                 presult);
3755 }
3756
3757 /****************************************************************************
3758  External interface.
3759  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3760  ****************************************************************************/
3761
3762 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3763                                           const struct ndr_syntax_id *interface,
3764                                           enum pipe_auth_level auth_level,
3765                                           const char *domain,
3766                                           const char *username,
3767                                           const char *password,
3768                                           struct rpc_pipe_client **presult)
3769 {
3770         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3771                                                 interface,
3772                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3773                                                 auth_level,
3774                                                 domain,
3775                                                 username,
3776                                                 password,
3777                                                 presult);
3778 }
3779
3780 /****************************************************************************
3781   Get a the schannel session key out of an already opened netlogon pipe.
3782  ****************************************************************************/
3783 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3784                                                 struct cli_state *cli,
3785                                                 const char *domain,
3786                                                 uint32 *pneg_flags)
3787 {
3788         uint32 sec_chan_type = 0;
3789         unsigned char machine_pwd[16];
3790         const char *machine_account;
3791         NTSTATUS status;
3792
3793         /* Get the machine account credentials from secrets.tdb. */
3794         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3795                                &sec_chan_type))
3796         {
3797                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3798                         "trust account password for domain '%s'\n",
3799                         domain));
3800                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3801         }
3802
3803         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3804                                         cli->desthost, /* server name */
3805                                         domain,        /* domain */
3806                                         global_myname(), /* client name */
3807                                         machine_account, /* machine account name */
3808                                         machine_pwd,
3809                                         sec_chan_type,
3810                                         pneg_flags);
3811
3812         if (!NT_STATUS_IS_OK(status)) {
3813                 DEBUG(3, ("get_schannel_session_key_common: "
3814                           "rpccli_netlogon_setup_creds failed with result %s "
3815                           "to server %s, domain %s, machine account %s.\n",
3816                           nt_errstr(status), cli->desthost, domain,
3817                           machine_account ));
3818                 return status;
3819         }
3820
3821         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3822                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3823                         cli->desthost));
3824                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3825         }
3826
3827         return NT_STATUS_OK;;
3828 }
3829
3830 /****************************************************************************
3831  Open a netlogon pipe and get the schannel session key.
3832  Now exposed to external callers.
3833  ****************************************************************************/
3834
3835
3836 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3837                                   const char *domain,
3838                                   uint32 *pneg_flags,
3839                                   struct rpc_pipe_client **presult)
3840 {
3841         struct rpc_pipe_client *netlogon_pipe = NULL;
3842         NTSTATUS status;
3843
3844         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3845                                           &netlogon_pipe);
3846         if (!NT_STATUS_IS_OK(status)) {
3847                 return status;
3848         }
3849
3850         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3851                                                  pneg_flags);
3852         if (!NT_STATUS_IS_OK(status)) {
3853                 TALLOC_FREE(netlogon_pipe);
3854                 return status;
3855         }
3856
3857         *presult = netlogon_pipe;
3858         return NT_STATUS_OK;
3859 }
3860
3861 /****************************************************************************
3862  External interface.
3863  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3864  using session_key. sign and seal.
3865  ****************************************************************************/
3866
3867 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3868                                              const struct ndr_syntax_id *interface,
3869                                              enum pipe_auth_level auth_level,
3870                                              const char *domain,
3871                                              const struct dcinfo *pdc,
3872                                              struct rpc_pipe_client **presult)
3873 {
3874         struct rpc_pipe_client *result;
3875         struct cli_pipe_auth_data *auth;
3876         NTSTATUS status;
3877
3878         status = cli_rpc_pipe_open(cli, interface, &result);
3879         if (!NT_STATUS_IS_OK(status)) {
3880                 return status;
3881         }
3882
3883         status = rpccli_schannel_bind_data(result, domain, auth_level,
3884                                            pdc->sess_key, &auth);
3885         if (!NT_STATUS_IS_OK(status)) {
3886                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3887                           nt_errstr(status)));
3888                 TALLOC_FREE(result);
3889                 return status;
3890         }
3891
3892         status = rpc_pipe_bind(result, auth);
3893         if (!NT_STATUS_IS_OK(status)) {
3894                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3895                           "cli_rpc_pipe_bind failed with error %s\n",
3896                           nt_errstr(status) ));
3897                 TALLOC_FREE(result);
3898                 return status;
3899         }
3900
3901         /*
3902          * The credentials on a new netlogon pipe are the ones we are passed
3903          * in - copy them over.
3904          */
3905         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3906         if (result->dc == NULL) {
3907                 DEBUG(0, ("talloc failed\n"));
3908                 TALLOC_FREE(result);
3909                 return NT_STATUS_NO_MEMORY;
3910         }
3911
3912         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3913                   "for domain %s and bound using schannel.\n",
3914                   get_pipe_name_from_iface(interface),
3915                   cli->desthost, domain ));
3916
3917         *presult = result;
3918         return NT_STATUS_OK;
3919 }
3920
3921 /****************************************************************************
3922  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3923  Fetch the session key ourselves using a temporary netlogon pipe. This
3924  version uses an ntlmssp auth bound netlogon pipe to get the key.
3925  ****************************************************************************/
3926
3927 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3928                                                       const char *domain,
3929                                                       const char *username,
3930                                                       const char *password,
3931                                                       uint32 *pneg_flags,
3932                                                       struct rpc_pipe_client **presult)
3933 {
3934         struct rpc_pipe_client *netlogon_pipe = NULL;
3935         NTSTATUS status;
3936
3937         status = cli_rpc_pipe_open_spnego_ntlmssp(
3938                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3939                 domain, username, password, &netlogon_pipe);
3940         if (!NT_STATUS_IS_OK(status)) {
3941                 return status;
3942         }
3943
3944         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3945                                                  pneg_flags);
3946         if (!NT_STATUS_IS_OK(status)) {
3947                 TALLOC_FREE(netlogon_pipe);
3948                 return status;
3949         }
3950
3951         *presult = netlogon_pipe;
3952         return NT_STATUS_OK;
3953 }
3954
3955 /****************************************************************************
3956  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3957  Fetch the session key ourselves using a temporary netlogon pipe. This version
3958  uses an ntlmssp bind to get the session key.
3959  ****************************************************************************/
3960
3961 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3962                                                  const struct ndr_syntax_id *interface,
3963                                                  enum pipe_auth_level auth_level,
3964                                                  const char *domain,
3965                                                  const char *username,
3966                                                  const char *password,
3967                                                  struct rpc_pipe_client **presult)
3968 {
3969         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3970         struct rpc_pipe_client *netlogon_pipe = NULL;
3971         struct rpc_pipe_client *result = NULL;
3972         NTSTATUS status;
3973
3974         status = get_schannel_session_key_auth_ntlmssp(
3975                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3976         if (!NT_STATUS_IS_OK(status)) {
3977                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3978                         "key from server %s for domain %s.\n",
3979                         cli->desthost, domain ));
3980                 return status;
3981         }
3982
3983         status = cli_rpc_pipe_open_schannel_with_key(
3984                 cli, interface, auth_level, domain, netlogon_pipe->dc,
3985                 &result);
3986
3987         /* Now we've bound using the session key we can close the netlog pipe. */
3988         TALLOC_FREE(netlogon_pipe);
3989
3990         if (NT_STATUS_IS_OK(status)) {
3991                 *presult = result;
3992         }
3993         return status;
3994 }
3995
3996 /****************************************************************************
3997  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3998  Fetch the session key ourselves using a temporary netlogon pipe.
3999  ****************************************************************************/
4000
4001 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4002                                     const struct ndr_syntax_id *interface,
4003                                     enum pipe_auth_level auth_level,
4004                                     const char *domain,
4005                                     struct rpc_pipe_client **presult)
4006 {
4007         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4008         struct rpc_pipe_client *netlogon_pipe = NULL;
4009         struct rpc_pipe_client *result = NULL;
4010         NTSTATUS status;
4011
4012         status = get_schannel_session_key(cli, domain, &neg_flags,
4013                                           &netlogon_pipe);
4014         if (!NT_STATUS_IS_OK(status)) {
4015                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4016                         "key from server %s for domain %s.\n",
4017                         cli->desthost, domain ));
4018                 return status;
4019         }
4020
4021         status = cli_rpc_pipe_open_schannel_with_key(
4022                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4023                 &result);
4024
4025         /* Now we've bound using the session key we can close the netlog pipe. */
4026         TALLOC_FREE(netlogon_pipe);
4027
4028         if (NT_STATUS_IS_OK(status)) {
4029                 *presult = result;
4030         }
4031
4032         return NT_STATUS_OK;
4033 }
4034
4035 /****************************************************************************
4036  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4037  The idea is this can be called with service_princ, username and password all
4038  NULL so long as the caller has a TGT.
4039  ****************************************************************************/
4040
4041 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4042                                 const struct ndr_syntax_id *interface,
4043                                 enum pipe_auth_level auth_level,
4044                                 const char *service_princ,
4045                                 const char *username,
4046                                 const char *password,
4047                                 struct rpc_pipe_client **presult)
4048 {
4049 #ifdef HAVE_KRB5
4050         struct rpc_pipe_client *result;
4051         struct cli_pipe_auth_data *auth;
4052         NTSTATUS status;
4053
4054         status = cli_rpc_pipe_open(cli, interface, &result);
4055         if (!NT_STATUS_IS_OK(status)) {
4056                 return status;
4057         }
4058
4059         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4060                                            username, password, &auth);
4061         if (!NT_STATUS_IS_OK(status)) {
4062                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4063                           nt_errstr(status)));
4064                 TALLOC_FREE(result);
4065                 return status;
4066         }
4067
4068         status = rpc_pipe_bind(result, auth);
4069         if (!NT_STATUS_IS_OK(status)) {
4070                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4071                           "with error %s\n", nt_errstr(status)));
4072                 TALLOC_FREE(result);
4073                 return status;
4074         }
4075
4076         *presult = result;
4077         return NT_STATUS_OK;
4078 #else
4079         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4080         return NT_STATUS_NOT_IMPLEMENTED;
4081 #endif
4082 }
4083
4084 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4085                              struct rpc_pipe_client *cli,
4086                              DATA_BLOB *session_key)
4087 {
4088         if (!session_key || !cli) {
4089                 return NT_STATUS_INVALID_PARAMETER;
4090         }
4091
4092         if (!cli->auth) {
4093                 return NT_STATUS_INVALID_PARAMETER;
4094         }
4095
4096         switch (cli->auth->auth_type) {
4097                 case PIPE_AUTH_TYPE_SCHANNEL:
4098                         *session_key = data_blob_talloc(mem_ctx,
4099                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4100                         break;
4101                 case PIPE_AUTH_TYPE_NTLMSSP:
4102                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4103                         *session_key = data_blob_talloc(mem_ctx,
4104                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4105                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4106                         break;
4107                 case PIPE_AUTH_TYPE_KRB5:
4108                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4109                         *session_key = data_blob_talloc(mem_ctx,
4110                                 cli->auth->a_u.kerberos_auth->session_key.data,
4111                                 cli->auth->a_u.kerberos_auth->session_key.length);
4112                         break;
4113                 case PIPE_AUTH_TYPE_NONE:
4114                         *session_key = data_blob_talloc(mem_ctx,
4115                                 cli->auth->user_session_key.data,
4116                                 cli->auth->user_session_key.length);
4117                         break;
4118                 default:
4119                         return NT_STATUS_NO_USER_SESSION_KEY;
4120         }
4121
4122         return NT_STATUS_OK;
4123 }