s3-rpc_client: add enum dcerpc_transport_t to rpc_cli_transport struct.
[ira/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "../libcli/auth/libcli_auth.h"
22 #include "librpc/gen_ndr/cli_epmapper.h"
23 #include "../librpc/gen_ndr/ndr_schannel.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_CLI
27
28 /*******************************************************************
29 interface/version dce/rpc pipe identification
30 ********************************************************************/
31
32 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
33 #define PIPE_SAMR     "\\PIPE\\samr"
34 #define PIPE_WINREG   "\\PIPE\\winreg"
35 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
36 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
37 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
38 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
39 #define PIPE_LSASS    "\\PIPE\\lsass"
40 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
41 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
42 #define PIPE_NETDFS   "\\PIPE\\netdfs"
43 #define PIPE_ECHO     "\\PIPE\\rpcecho"
44 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
45 #define PIPE_EPM      "\\PIPE\\epmapper"
46 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
47 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
48 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
49 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
50
51 /*
52  * IMPORTANT!!  If you update this structure, make sure to
53  * update the index #defines in smb.h.
54  */
55
56 static const struct pipe_id_info {
57         /* the names appear not to matter: the syntaxes _do_ matter */
58
59         const char *client_pipe;
60         const struct ndr_syntax_id *abstr_syntax; /* this one is the abstract syntax id */
61 } pipe_names [] =
62 {
63         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
64         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
65         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
66         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
67         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
68         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
69         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
70         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
71         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
72         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
73         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
74         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
75         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
76         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
77         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
78         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
79         { NULL, NULL }
80 };
81
82 /****************************************************************************
83  Return the pipe name from the interface.
84  ****************************************************************************/
85
86 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
87 {
88         char *guid_str;
89         const char *result;
90         int i;
91         for (i = 0; pipe_names[i].client_pipe; i++) {
92                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
93                                         interface)) {
94                         return &pipe_names[i].client_pipe[5];
95                 }
96         }
97
98         /*
99          * Here we should ask \\epmapper, but for now our code is only
100          * interested in the known pipes mentioned in pipe_names[]
101          */
102
103         guid_str = GUID_string(talloc_tos(), &interface->uuid);
104         if (guid_str == NULL) {
105                 return NULL;
106         }
107         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
108                                  (int)interface->if_version);
109         TALLOC_FREE(guid_str);
110
111         if (result == NULL) {
112                 return "PIPE";
113         }
114         return result;
115 }
116
117 /********************************************************************
118  Map internal value to wire value.
119  ********************************************************************/
120
121 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
122 {
123         switch (auth_type) {
124
125         case PIPE_AUTH_TYPE_NONE:
126                 return RPC_ANONYMOUS_AUTH_TYPE;
127
128         case PIPE_AUTH_TYPE_NTLMSSP:
129                 return RPC_NTLMSSP_AUTH_TYPE;
130
131         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
132         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
133                 return RPC_SPNEGO_AUTH_TYPE;
134
135         case PIPE_AUTH_TYPE_SCHANNEL:
136                 return RPC_SCHANNEL_AUTH_TYPE;
137
138         case PIPE_AUTH_TYPE_KRB5:
139                 return RPC_KRB5_AUTH_TYPE;
140
141         default:
142                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
143                         "auth type %u\n",
144                         (unsigned int)auth_type ));
145                 break;
146         }
147         return -1;
148 }
149
150 /********************************************************************
151  Pipe description for a DEBUG
152  ********************************************************************/
153 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
154                                    struct rpc_pipe_client *cli)
155 {
156         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
157         if (result == NULL) {
158                 return "pipe";
159         }
160         return result;
161 }
162
163 /********************************************************************
164  Rpc pipe call id.
165  ********************************************************************/
166
167 static uint32 get_rpc_call_id(void)
168 {
169         static uint32 call_id = 0;
170         return ++call_id;
171 }
172
173 /*
174  * Realloc pdu to have a least "size" bytes
175  */
176
177 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
178 {
179         size_t extra_size;
180
181         if (prs_data_size(pdu) >= size) {
182                 return true;
183         }
184
185         extra_size = size - prs_data_size(pdu);
186
187         if (!prs_force_grow(pdu, extra_size)) {
188                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
189                           "%d bytes.\n", (int)extra_size));
190                 return false;
191         }
192
193         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
194                   (int)extra_size, prs_data_size(pdu)));
195         return true;
196 }
197
198
199 /*******************************************************************
200  Use SMBreadX to get rest of one fragment's worth of rpc data.
201  Reads the whole size or give an error message
202  ********************************************************************/
203
204 struct rpc_read_state {
205         struct event_context *ev;
206         struct rpc_cli_transport *transport;
207         uint8_t *data;
208         size_t size;
209         size_t num_read;
210 };
211
212 static void rpc_read_done(struct tevent_req *subreq);
213
214 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
215                                         struct event_context *ev,
216                                         struct rpc_cli_transport *transport,
217                                         uint8_t *data, size_t size)
218 {
219         struct tevent_req *req, *subreq;
220         struct rpc_read_state *state;
221
222         req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
223         if (req == NULL) {
224                 return NULL;
225         }
226         state->ev = ev;
227         state->transport = transport;
228         state->data = data;
229         state->size = size;
230         state->num_read = 0;
231
232         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
233
234         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
235                                       transport->priv);
236         if (subreq == NULL) {
237                 goto fail;
238         }
239         tevent_req_set_callback(subreq, rpc_read_done, req);
240         return req;
241
242  fail:
243         TALLOC_FREE(req);
244         return NULL;
245 }
246
247 static void rpc_read_done(struct tevent_req *subreq)
248 {
249         struct tevent_req *req = tevent_req_callback_data(
250                 subreq, struct tevent_req);
251         struct rpc_read_state *state = tevent_req_data(
252                 req, struct rpc_read_state);
253         NTSTATUS status;
254         ssize_t received;
255
256         status = state->transport->read_recv(subreq, &received);
257         TALLOC_FREE(subreq);
258         if (!NT_STATUS_IS_OK(status)) {
259                 tevent_req_nterror(req, status);
260                 return;
261         }
262
263         state->num_read += received;
264         if (state->num_read == state->size) {
265                 tevent_req_done(req);
266                 return;
267         }
268
269         subreq = state->transport->read_send(state, state->ev,
270                                              state->data + state->num_read,
271                                              state->size - state->num_read,
272                                              state->transport->priv);
273         if (tevent_req_nomem(subreq, req)) {
274                 return;
275         }
276         tevent_req_set_callback(subreq, rpc_read_done, req);
277 }
278
279 static NTSTATUS rpc_read_recv(struct tevent_req *req)
280 {
281         return tevent_req_simple_recv_ntstatus(req);
282 }
283
284 struct rpc_write_state {
285         struct event_context *ev;
286         struct rpc_cli_transport *transport;
287         const uint8_t *data;
288         size_t size;
289         size_t num_written;
290 };
291
292 static void rpc_write_done(struct tevent_req *subreq);
293
294 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
295                                          struct event_context *ev,
296                                          struct rpc_cli_transport *transport,
297                                          const uint8_t *data, size_t size)
298 {
299         struct tevent_req *req, *subreq;
300         struct rpc_write_state *state;
301
302         req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
303         if (req == NULL) {
304                 return NULL;
305         }
306         state->ev = ev;
307         state->transport = transport;
308         state->data = data;
309         state->size = size;
310         state->num_written = 0;
311
312         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
313
314         subreq = transport->write_send(state, ev, data, size, transport->priv);
315         if (subreq == NULL) {
316                 goto fail;
317         }
318         tevent_req_set_callback(subreq, rpc_write_done, req);
319         return req;
320  fail:
321         TALLOC_FREE(req);
322         return NULL;
323 }
324
325 static void rpc_write_done(struct tevent_req *subreq)
326 {
327         struct tevent_req *req = tevent_req_callback_data(
328                 subreq, struct tevent_req);
329         struct rpc_write_state *state = tevent_req_data(
330                 req, struct rpc_write_state);
331         NTSTATUS status;
332         ssize_t written;
333
334         status = state->transport->write_recv(subreq, &written);
335         TALLOC_FREE(subreq);
336         if (!NT_STATUS_IS_OK(status)) {
337                 tevent_req_nterror(req, status);
338                 return;
339         }
340
341         state->num_written += written;
342
343         if (state->num_written == state->size) {
344                 tevent_req_done(req);
345                 return;
346         }
347
348         subreq = state->transport->write_send(state, state->ev,
349                                               state->data + state->num_written,
350                                               state->size - state->num_written,
351                                               state->transport->priv);
352         if (tevent_req_nomem(subreq, req)) {
353                 return;
354         }
355         tevent_req_set_callback(subreq, rpc_write_done, req);
356 }
357
358 static NTSTATUS rpc_write_recv(struct tevent_req *req)
359 {
360         return tevent_req_simple_recv_ntstatus(req);
361 }
362
363
364 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
365                                  struct rpc_hdr_info *prhdr,
366                                  prs_struct *pdu)
367 {
368         /*
369          * This next call sets the endian bit correctly in current_pdu. We
370          * will propagate this to rbuf later.
371          */
372
373         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
374                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
375                 return NT_STATUS_BUFFER_TOO_SMALL;
376         }
377
378         if (prhdr->frag_len > cli->max_recv_frag) {
379                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
380                           " we only allow %d\n", (int)prhdr->frag_len,
381                           (int)cli->max_recv_frag));
382                 return NT_STATUS_BUFFER_TOO_SMALL;
383         }
384
385         return NT_STATUS_OK;
386 }
387
388 /****************************************************************************
389  Try and get a PDU's worth of data from current_pdu. If not, then read more
390  from the wire.
391  ****************************************************************************/
392
393 struct get_complete_frag_state {
394         struct event_context *ev;
395         struct rpc_pipe_client *cli;
396         struct rpc_hdr_info *prhdr;
397         prs_struct *pdu;
398 };
399
400 static void get_complete_frag_got_header(struct tevent_req *subreq);
401 static void get_complete_frag_got_rest(struct tevent_req *subreq);
402
403 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
404                                                  struct event_context *ev,
405                                                  struct rpc_pipe_client *cli,
406                                                  struct rpc_hdr_info *prhdr,
407                                                  prs_struct *pdu)
408 {
409         struct tevent_req *req, *subreq;
410         struct get_complete_frag_state *state;
411         uint32_t pdu_len;
412         NTSTATUS status;
413
414         req = tevent_req_create(mem_ctx, &state,
415                                 struct get_complete_frag_state);
416         if (req == NULL) {
417                 return NULL;
418         }
419         state->ev = ev;
420         state->cli = cli;
421         state->prhdr = prhdr;
422         state->pdu = pdu;
423
424         pdu_len = prs_data_size(pdu);
425         if (pdu_len < RPC_HEADER_LEN) {
426                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
427                         status = NT_STATUS_NO_MEMORY;
428                         goto post_status;
429                 }
430                 subreq = rpc_read_send(
431                         state, state->ev,
432                         state->cli->transport,
433                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
434                         RPC_HEADER_LEN - pdu_len);
435                 if (subreq == NULL) {
436                         status = NT_STATUS_NO_MEMORY;
437                         goto post_status;
438                 }
439                 tevent_req_set_callback(subreq, get_complete_frag_got_header,
440                                         req);
441                 return req;
442         }
443
444         status = parse_rpc_header(cli, prhdr, pdu);
445         if (!NT_STATUS_IS_OK(status)) {
446                 goto post_status;
447         }
448
449         /*
450          * Ensure we have frag_len bytes of data.
451          */
452         if (pdu_len < prhdr->frag_len) {
453                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
454                         status = NT_STATUS_NO_MEMORY;
455                         goto post_status;
456                 }
457                 subreq = rpc_read_send(state, state->ev,
458                                        state->cli->transport,
459                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
460                                        prhdr->frag_len - pdu_len);
461                 if (subreq == NULL) {
462                         status = NT_STATUS_NO_MEMORY;
463                         goto post_status;
464                 }
465                 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
466                                         req);
467                 return req;
468         }
469
470         status = NT_STATUS_OK;
471  post_status:
472         if (NT_STATUS_IS_OK(status)) {
473                 tevent_req_done(req);
474         } else {
475                 tevent_req_nterror(req, status);
476         }
477         return tevent_req_post(req, ev);
478 }
479
480 static void get_complete_frag_got_header(struct tevent_req *subreq)
481 {
482         struct tevent_req *req = tevent_req_callback_data(
483                 subreq, struct tevent_req);
484         struct get_complete_frag_state *state = tevent_req_data(
485                 req, struct get_complete_frag_state);
486         NTSTATUS status;
487
488         status = rpc_read_recv(subreq);
489         TALLOC_FREE(subreq);
490         if (!NT_STATUS_IS_OK(status)) {
491                 tevent_req_nterror(req, status);
492                 return;
493         }
494
495         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
496         if (!NT_STATUS_IS_OK(status)) {
497                 tevent_req_nterror(req, status);
498                 return;
499         }
500
501         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
502                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
503                 return;
504         }
505
506         /*
507          * We're here in this piece of code because we've read exactly
508          * RPC_HEADER_LEN bytes into state->pdu.
509          */
510
511         subreq = rpc_read_send(
512                 state, state->ev, state->cli->transport,
513                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
514                 state->prhdr->frag_len - RPC_HEADER_LEN);
515         if (tevent_req_nomem(subreq, req)) {
516                 return;
517         }
518         tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
519 }
520
521 static void get_complete_frag_got_rest(struct tevent_req *subreq)
522 {
523         struct tevent_req *req = tevent_req_callback_data(
524                 subreq, struct tevent_req);
525         NTSTATUS status;
526
527         status = rpc_read_recv(subreq);
528         TALLOC_FREE(subreq);
529         if (!NT_STATUS_IS_OK(status)) {
530                 tevent_req_nterror(req, status);
531                 return;
532         }
533         tevent_req_done(req);
534 }
535
536 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
537 {
538         return tevent_req_simple_recv_ntstatus(req);
539 }
540
541 /****************************************************************************
542  NTLMSSP specific sign/seal.
543  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
544  In fact I should probably abstract these into identical pieces of code... JRA.
545  ****************************************************************************/
546
547 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
548                                 prs_struct *current_pdu,
549                                 uint8 *p_ss_padding_len)
550 {
551         RPC_HDR_AUTH auth_info;
552         uint32 save_offset = prs_offset(current_pdu);
553         uint32 auth_len = prhdr->auth_len;
554         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
555         unsigned char *data = NULL;
556         size_t data_len;
557         unsigned char *full_packet_data = NULL;
558         size_t full_packet_data_len;
559         DATA_BLOB auth_blob;
560         NTSTATUS status;
561
562         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
563             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
564                 return NT_STATUS_OK;
565         }
566
567         if (!ntlmssp_state) {
568                 return NT_STATUS_INVALID_PARAMETER;
569         }
570
571         /* Ensure there's enough data for an authenticated response. */
572         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
573                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
574                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
575                         (unsigned int)auth_len ));
576                 return NT_STATUS_BUFFER_TOO_SMALL;
577         }
578
579         /*
580          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
581          * after the RPC header.
582          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
583          * functions as NTLMv2 checks the rpc headers also.
584          */
585
586         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
587         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
588
589         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
590         full_packet_data_len = prhdr->frag_len - auth_len;
591
592         /* Pull the auth header and the following data into a blob. */
593         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
594                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
595                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
596                 return NT_STATUS_BUFFER_TOO_SMALL;
597         }
598
599         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
600                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
601                 return NT_STATUS_BUFFER_TOO_SMALL;
602         }
603
604         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
605         auth_blob.length = auth_len;
606
607         switch (cli->auth->auth_level) {
608                 case PIPE_AUTH_LEVEL_PRIVACY:
609                         /* Data is encrypted. */
610                         status = ntlmssp_unseal_packet(ntlmssp_state,
611                                                         data, data_len,
612                                                         full_packet_data,
613                                                         full_packet_data_len,
614                                                         &auth_blob);
615                         if (!NT_STATUS_IS_OK(status)) {
616                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
617                                         "packet from %s. Error was %s.\n",
618                                         rpccli_pipe_txt(debug_ctx(), cli),
619                                         nt_errstr(status) ));
620                                 return status;
621                         }
622                         break;
623                 case PIPE_AUTH_LEVEL_INTEGRITY:
624                         /* Data is signed. */
625                         status = ntlmssp_check_packet(ntlmssp_state,
626                                                         data, data_len,
627                                                         full_packet_data,
628                                                         full_packet_data_len,
629                                                         &auth_blob);
630                         if (!NT_STATUS_IS_OK(status)) {
631                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
632                                         "packet from %s. Error was %s.\n",
633                                         rpccli_pipe_txt(debug_ctx(), cli),
634                                         nt_errstr(status) ));
635                                 return status;
636                         }
637                         break;
638                 default:
639                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
640                                   "auth level %d\n", cli->auth->auth_level));
641                         return NT_STATUS_INVALID_INFO_CLASS;
642         }
643
644         /*
645          * Return the current pointer to the data offset.
646          */
647
648         if(!prs_set_offset(current_pdu, save_offset)) {
649                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
650                         (unsigned int)save_offset ));
651                 return NT_STATUS_BUFFER_TOO_SMALL;
652         }
653
654         /*
655          * Remember the padding length. We must remove it from the real data
656          * stream once the sign/seal is done.
657          */
658
659         *p_ss_padding_len = auth_info.auth_pad_len;
660
661         return NT_STATUS_OK;
662 }
663
664 /****************************************************************************
665  schannel specific sign/seal.
666  ****************************************************************************/
667
668 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
669                                 prs_struct *current_pdu,
670                                 uint8 *p_ss_padding_len)
671 {
672         RPC_HDR_AUTH auth_info;
673         RPC_AUTH_SCHANNEL_CHK schannel_chk;
674         uint32 auth_len = prhdr->auth_len;
675         uint32 save_offset = prs_offset(current_pdu);
676         struct schannel_auth_struct *schannel_auth =
677                 cli->auth->a_u.schannel_auth;
678         uint32 data_len;
679
680         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
681             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
682                 return NT_STATUS_OK;
683         }
684
685         if (auth_len < RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
686                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
687                 return NT_STATUS_INVALID_PARAMETER;
688         }
689
690         if (!schannel_auth) {
691                 return NT_STATUS_INVALID_PARAMETER;
692         }
693
694         /* Ensure there's enough data for an authenticated response. */
695         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
696                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
697                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
698                         (unsigned int)auth_len ));
699                 return NT_STATUS_INVALID_PARAMETER;
700         }
701
702         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
703
704         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
705                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
706                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
707                 return NT_STATUS_BUFFER_TOO_SMALL;
708         }
709
710         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
711                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
712                 return NT_STATUS_BUFFER_TOO_SMALL;
713         }
714
715         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
716                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
717                         auth_info.auth_type));
718                 return NT_STATUS_BUFFER_TOO_SMALL;
719         }
720
721         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
722                                 &schannel_chk, current_pdu, 0)) {
723                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
724                 return NT_STATUS_BUFFER_TOO_SMALL;
725         }
726
727         if (!schannel_decode(schannel_auth,
728                         cli->auth->auth_level,
729                         SENDER_IS_ACCEPTOR,
730                         &schannel_chk,
731                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
732                         data_len)) {
733                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
734                                 "Connection to %s.\n",
735                                 rpccli_pipe_txt(debug_ctx(), cli)));
736                 return NT_STATUS_INVALID_PARAMETER;
737         }
738
739         /* The sequence number gets incremented on both send and receive. */
740         schannel_auth->seq_num++;
741
742         /*
743          * Return the current pointer to the data offset.
744          */
745
746         if(!prs_set_offset(current_pdu, save_offset)) {
747                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
748                         (unsigned int)save_offset ));
749                 return NT_STATUS_BUFFER_TOO_SMALL;
750         }
751
752         /*
753          * Remember the padding length. We must remove it from the real data
754          * stream once the sign/seal is done.
755          */
756
757         *p_ss_padding_len = auth_info.auth_pad_len;
758
759         return NT_STATUS_OK;
760 }
761
762 /****************************************************************************
763  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
764  ****************************************************************************/
765
766 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
767                                 prs_struct *current_pdu,
768                                 uint8 *p_ss_padding_len)
769 {
770         NTSTATUS ret = NT_STATUS_OK;
771
772         /* Paranioa checks for auth_len. */
773         if (prhdr->auth_len) {
774                 if (prhdr->auth_len > prhdr->frag_len) {
775                         return NT_STATUS_INVALID_PARAMETER;
776                 }
777
778                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
779                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
780                         /* Integer wrap attempt. */
781                         return NT_STATUS_INVALID_PARAMETER;
782                 }
783         }
784
785         /*
786          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
787          */
788
789         switch(cli->auth->auth_type) {
790                 case PIPE_AUTH_TYPE_NONE:
791                         if (prhdr->auth_len) {
792                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
793                                           "Connection to %s - got non-zero "
794                                           "auth len %u.\n",
795                                         rpccli_pipe_txt(debug_ctx(), cli),
796                                         (unsigned int)prhdr->auth_len ));
797                                 return NT_STATUS_INVALID_PARAMETER;
798                         }
799                         break;
800
801                 case PIPE_AUTH_TYPE_NTLMSSP:
802                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
803                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
804                         if (!NT_STATUS_IS_OK(ret)) {
805                                 return ret;
806                         }
807                         break;
808
809                 case PIPE_AUTH_TYPE_SCHANNEL:
810                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
811                         if (!NT_STATUS_IS_OK(ret)) {
812                                 return ret;
813                         }
814                         break;
815
816                 case PIPE_AUTH_TYPE_KRB5:
817                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
818                 default:
819                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
820                                   "to %s - unknown internal auth type %u.\n",
821                                   rpccli_pipe_txt(debug_ctx(), cli),
822                                   cli->auth->auth_type ));
823                         return NT_STATUS_INVALID_INFO_CLASS;
824         }
825
826         return NT_STATUS_OK;
827 }
828
829 /****************************************************************************
830  Do basic authentication checks on an incoming pdu.
831  ****************************************************************************/
832
833 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
834                         prs_struct *current_pdu,
835                         uint8 expected_pkt_type,
836                         char **ppdata,
837                         uint32 *pdata_len,
838                         prs_struct *return_data)
839 {
840
841         NTSTATUS ret = NT_STATUS_OK;
842         uint32 current_pdu_len = prs_data_size(current_pdu);
843
844         if (current_pdu_len != prhdr->frag_len) {
845                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
846                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
847                 return NT_STATUS_INVALID_PARAMETER;
848         }
849
850         /*
851          * Point the return values at the real data including the RPC
852          * header. Just in case the caller wants it.
853          */
854         *ppdata = prs_data_p(current_pdu);
855         *pdata_len = current_pdu_len;
856
857         /* Ensure we have the correct type. */
858         switch (prhdr->pkt_type) {
859                 case RPC_ALTCONTRESP:
860                 case RPC_BINDACK:
861
862                         /* Alter context and bind ack share the same packet definitions. */
863                         break;
864
865
866                 case RPC_RESPONSE:
867                 {
868                         RPC_HDR_RESP rhdr_resp;
869                         uint8 ss_padding_len = 0;
870
871                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
872                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
873                                 return NT_STATUS_BUFFER_TOO_SMALL;
874                         }
875
876                         /* Here's where we deal with incoming sign/seal. */
877                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
878                                         current_pdu, &ss_padding_len);
879                         if (!NT_STATUS_IS_OK(ret)) {
880                                 return ret;
881                         }
882
883                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
884                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
885
886                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
887                                 return NT_STATUS_BUFFER_TOO_SMALL;
888                         }
889
890                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
891
892                         /* Remember to remove the auth footer. */
893                         if (prhdr->auth_len) {
894                                 /* We've already done integer wrap tests on auth_len in
895                                         cli_pipe_validate_rpc_response(). */
896                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
897                                         return NT_STATUS_BUFFER_TOO_SMALL;
898                                 }
899                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
900                         }
901
902                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
903                                 current_pdu_len, *pdata_len, ss_padding_len ));
904
905                         /*
906                          * If this is the first reply, and the allocation hint is reasonably, try and
907                          * set up the return_data parse_struct to the correct size.
908                          */
909
910                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
911                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
912                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
913                                                 "too large to allocate\n",
914                                                 (unsigned int)rhdr_resp.alloc_hint ));
915                                         return NT_STATUS_NO_MEMORY;
916                                 }
917                         }
918
919                         break;
920                 }
921
922                 case RPC_BINDNACK:
923                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
924                                   "received from %s!\n",
925                                   rpccli_pipe_txt(debug_ctx(), cli)));
926                         /* Use this for now... */
927                         return NT_STATUS_NETWORK_ACCESS_DENIED;
928
929                 case RPC_FAULT:
930                 {
931                         RPC_HDR_RESP rhdr_resp;
932                         RPC_HDR_FAULT fault_resp;
933
934                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
935                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
936                                 return NT_STATUS_BUFFER_TOO_SMALL;
937                         }
938
939                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
940                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
941                                 return NT_STATUS_BUFFER_TOO_SMALL;
942                         }
943
944                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
945                                   "code %s received from %s!\n",
946                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
947                                 rpccli_pipe_txt(debug_ctx(), cli)));
948                         if (NT_STATUS_IS_OK(fault_resp.status)) {
949                                 return NT_STATUS_UNSUCCESSFUL;
950                         } else {
951                                 return fault_resp.status;
952                         }
953                 }
954
955                 default:
956                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
957                                 "from %s!\n",
958                                 (unsigned int)prhdr->pkt_type,
959                                 rpccli_pipe_txt(debug_ctx(), cli)));
960                         return NT_STATUS_INVALID_INFO_CLASS;
961         }
962
963         if (prhdr->pkt_type != expected_pkt_type) {
964                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
965                           "got an unexpected RPC packet type - %u, not %u\n",
966                         rpccli_pipe_txt(debug_ctx(), cli),
967                         prhdr->pkt_type,
968                         expected_pkt_type));
969                 return NT_STATUS_INVALID_INFO_CLASS;
970         }
971
972         /* Do this just before return - we don't want to modify any rpc header
973            data before now as we may have needed to do cryptographic actions on
974            it before. */
975
976         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
977                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
978                         "setting fragment first/last ON.\n"));
979                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
980         }
981
982         return NT_STATUS_OK;
983 }
984
985 /****************************************************************************
986  Ensure we eat the just processed pdu from the current_pdu prs_struct.
987  Normally the frag_len and buffer size will match, but on the first trans
988  reply there is a theoretical chance that buffer size > frag_len, so we must
989  deal with that.
990  ****************************************************************************/
991
992 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
993 {
994         uint32 current_pdu_len = prs_data_size(current_pdu);
995
996         if (current_pdu_len < prhdr->frag_len) {
997                 return NT_STATUS_BUFFER_TOO_SMALL;
998         }
999
1000         /* Common case. */
1001         if (current_pdu_len == (uint32)prhdr->frag_len) {
1002                 prs_mem_free(current_pdu);
1003                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1004                 /* Make current_pdu dynamic with no memory. */
1005                 prs_give_memory(current_pdu, 0, 0, True);
1006                 return NT_STATUS_OK;
1007         }
1008
1009         /*
1010          * Oh no ! More data in buffer than we processed in current pdu.
1011          * Cheat. Move the data down and shrink the buffer.
1012          */
1013
1014         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1015                         current_pdu_len - prhdr->frag_len);
1016
1017         /* Remember to set the read offset back to zero. */
1018         prs_set_offset(current_pdu, 0);
1019
1020         /* Shrink the buffer. */
1021         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1022                 return NT_STATUS_BUFFER_TOO_SMALL;
1023         }
1024
1025         return NT_STATUS_OK;
1026 }
1027
1028 /****************************************************************************
1029  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1030 ****************************************************************************/
1031
1032 struct cli_api_pipe_state {
1033         struct event_context *ev;
1034         struct rpc_cli_transport *transport;
1035         uint8_t *rdata;
1036         uint32_t rdata_len;
1037 };
1038
1039 static void cli_api_pipe_trans_done(struct tevent_req *subreq);
1040 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1041 static void cli_api_pipe_read_done(struct tevent_req *subreq);
1042
1043 static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1044                                             struct event_context *ev,
1045                                             struct rpc_cli_transport *transport,
1046                                             uint8_t *data, size_t data_len,
1047                                             uint32_t max_rdata_len)
1048 {
1049         struct tevent_req *req, *subreq;
1050         struct cli_api_pipe_state *state;
1051         NTSTATUS status;
1052
1053         req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
1054         if (req == NULL) {
1055                 return NULL;
1056         }
1057         state->ev = ev;
1058         state->transport = transport;
1059
1060         if (max_rdata_len < RPC_HEADER_LEN) {
1061                 /*
1062                  * For a RPC reply we always need at least RPC_HEADER_LEN
1063                  * bytes. We check this here because we will receive
1064                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1065                  */
1066                 status = NT_STATUS_INVALID_PARAMETER;
1067                 goto post_status;
1068         }
1069
1070         if (transport->trans_send != NULL) {
1071                 subreq = transport->trans_send(state, ev, data, data_len,
1072                                                max_rdata_len, transport->priv);
1073                 if (subreq == NULL) {
1074                         goto fail;
1075                 }
1076                 tevent_req_set_callback(subreq, cli_api_pipe_trans_done, req);
1077                 return req;
1078         }
1079
1080         /*
1081          * If the transport does not provide a "trans" routine, i.e. for
1082          * example the ncacn_ip_tcp transport, do the write/read step here.
1083          */
1084
1085         subreq = rpc_write_send(state, ev, transport, data, data_len);
1086         if (subreq == NULL) {
1087                 goto fail;
1088         }
1089         tevent_req_set_callback(subreq, cli_api_pipe_write_done, req);
1090         return req;
1091
1092         status = NT_STATUS_INVALID_PARAMETER;
1093
1094  post_status:
1095         tevent_req_nterror(req, status);
1096         return tevent_req_post(req, ev);
1097  fail:
1098         TALLOC_FREE(req);
1099         return NULL;
1100 }
1101
1102 static void cli_api_pipe_trans_done(struct tevent_req *subreq)
1103 {
1104         struct tevent_req *req = tevent_req_callback_data(
1105                 subreq, struct tevent_req);
1106         struct cli_api_pipe_state *state = tevent_req_data(
1107                 req, struct cli_api_pipe_state);
1108         NTSTATUS status;
1109
1110         status = state->transport->trans_recv(subreq, state, &state->rdata,
1111                                               &state->rdata_len);
1112         TALLOC_FREE(subreq);
1113         if (!NT_STATUS_IS_OK(status)) {
1114                 tevent_req_nterror(req, status);
1115                 return;
1116         }
1117         tevent_req_done(req);
1118 }
1119
1120 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1121 {
1122         struct tevent_req *req = tevent_req_callback_data(
1123                 subreq, struct tevent_req);
1124         struct cli_api_pipe_state *state = tevent_req_data(
1125                 req, struct cli_api_pipe_state);
1126         NTSTATUS status;
1127
1128         status = rpc_write_recv(subreq);
1129         TALLOC_FREE(subreq);
1130         if (!NT_STATUS_IS_OK(status)) {
1131                 tevent_req_nterror(req, status);
1132                 return;
1133         }
1134
1135         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1136         if (tevent_req_nomem(state->rdata, req)) {
1137                 return;
1138         }
1139
1140         /*
1141          * We don't need to use rpc_read_send here, the upper layer will cope
1142          * with a short read, transport->trans_send could also return less
1143          * than state->max_rdata_len.
1144          */
1145         subreq = state->transport->read_send(state, state->ev, state->rdata,
1146                                              RPC_HEADER_LEN,
1147                                              state->transport->priv);
1148         if (tevent_req_nomem(subreq, req)) {
1149                 return;
1150         }
1151         tevent_req_set_callback(subreq, cli_api_pipe_read_done, req);
1152 }
1153
1154 static void cli_api_pipe_read_done(struct tevent_req *subreq)
1155 {
1156         struct tevent_req *req = tevent_req_callback_data(
1157                 subreq, struct tevent_req);
1158         struct cli_api_pipe_state *state = tevent_req_data(
1159                 req, struct cli_api_pipe_state);
1160         NTSTATUS status;
1161         ssize_t received;
1162
1163         status = state->transport->read_recv(subreq, &received);
1164         TALLOC_FREE(subreq);
1165         if (!NT_STATUS_IS_OK(status)) {
1166                 tevent_req_nterror(req, status);
1167                 return;
1168         }
1169         state->rdata_len = received;
1170         tevent_req_done(req);
1171 }
1172
1173 static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1174                                   uint8_t **prdata, uint32_t *prdata_len)
1175 {
1176         struct cli_api_pipe_state *state = tevent_req_data(
1177                 req, struct cli_api_pipe_state);
1178         NTSTATUS status;
1179
1180         if (tevent_req_is_nterror(req, &status)) {
1181                 return status;
1182         }
1183
1184         *prdata = talloc_move(mem_ctx, &state->rdata);
1185         *prdata_len = state->rdata_len;
1186         return NT_STATUS_OK;
1187 }
1188
1189 /****************************************************************************
1190  Send data on an rpc pipe via trans. The prs_struct data must be the last
1191  pdu fragment of an NDR data stream.
1192
1193  Receive response data from an rpc pipe, which may be large...
1194
1195  Read the first fragment: unfortunately have to use SMBtrans for the first
1196  bit, then SMBreadX for subsequent bits.
1197
1198  If first fragment received also wasn't the last fragment, continue
1199  getting fragments until we _do_ receive the last fragment.
1200
1201  Request/Response PDU's look like the following...
1202
1203  |<------------------PDU len----------------------------------------------->|
1204  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1205
1206  +------------+-----------------+-------------+---------------+-------------+
1207  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1208  +------------+-----------------+-------------+---------------+-------------+
1209
1210  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1211  signing & sealing being negotiated.
1212
1213  ****************************************************************************/
1214
1215 struct rpc_api_pipe_state {
1216         struct event_context *ev;
1217         struct rpc_pipe_client *cli;
1218         uint8_t expected_pkt_type;
1219
1220         prs_struct incoming_frag;
1221         struct rpc_hdr_info rhdr;
1222
1223         prs_struct incoming_pdu;        /* Incoming reply */
1224         uint32_t incoming_pdu_offset;
1225 };
1226
1227 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1228 {
1229         prs_mem_free(&state->incoming_frag);
1230         prs_mem_free(&state->incoming_pdu);
1231         return 0;
1232 }
1233
1234 static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
1235 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1236
1237 static struct tevent_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1238                                             struct event_context *ev,
1239                                             struct rpc_pipe_client *cli,
1240                                             prs_struct *data, /* Outgoing PDU */
1241                                             uint8_t expected_pkt_type)
1242 {
1243         struct tevent_req *req, *subreq;
1244         struct rpc_api_pipe_state *state;
1245         uint16_t max_recv_frag;
1246         NTSTATUS status;
1247
1248         req = tevent_req_create(mem_ctx, &state, struct rpc_api_pipe_state);
1249         if (req == NULL) {
1250                 return NULL;
1251         }
1252         state->ev = ev;
1253         state->cli = cli;
1254         state->expected_pkt_type = expected_pkt_type;
1255         state->incoming_pdu_offset = 0;
1256
1257         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1258
1259         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1260         /* Make incoming_pdu dynamic with no memory. */
1261         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1262
1263         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1264
1265         /*
1266          * Ensure we're not sending too much.
1267          */
1268         if (prs_offset(data) > cli->max_xmit_frag) {
1269                 status = NT_STATUS_INVALID_PARAMETER;
1270                 goto post_status;
1271         }
1272
1273         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1274
1275         max_recv_frag = cli->max_recv_frag;
1276
1277 #ifdef DEVELOPER
1278         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1279 #endif
1280
1281         subreq = cli_api_pipe_send(state, ev, cli->transport,
1282                                    (uint8_t *)prs_data_p(data),
1283                                    prs_offset(data), max_recv_frag);
1284         if (subreq == NULL) {
1285                 goto fail;
1286         }
1287         tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, req);
1288         return req;
1289
1290  post_status:
1291         tevent_req_nterror(req, status);
1292         return tevent_req_post(req, ev);
1293  fail:
1294         TALLOC_FREE(req);
1295         return NULL;
1296 }
1297
1298 static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
1299 {
1300         struct tevent_req *req = tevent_req_callback_data(
1301                 subreq, struct tevent_req);
1302         struct rpc_api_pipe_state *state = tevent_req_data(
1303                 req, struct rpc_api_pipe_state);
1304         NTSTATUS status;
1305         uint8_t *rdata = NULL;
1306         uint32_t rdata_len = 0;
1307         char *rdata_copy;
1308
1309         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1310         TALLOC_FREE(subreq);
1311         if (!NT_STATUS_IS_OK(status)) {
1312                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1313                 tevent_req_nterror(req, status);
1314                 return;
1315         }
1316
1317         if (rdata == NULL) {
1318                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1319                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1320                 tevent_req_done(req);
1321                 return;
1322         }
1323
1324         /*
1325          * Give the memory received from cli_trans as dynamic to the current
1326          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1327          * :-(
1328          */
1329         rdata_copy = (char *)memdup(rdata, rdata_len);
1330         TALLOC_FREE(rdata);
1331         if (tevent_req_nomem(rdata_copy, req)) {
1332                 return;
1333         }
1334         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1335
1336         /* Ensure we have enough data for a pdu. */
1337         subreq = get_complete_frag_send(state, state->ev, state->cli,
1338                                         &state->rhdr, &state->incoming_frag);
1339         if (tevent_req_nomem(subreq, req)) {
1340                 return;
1341         }
1342         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1343 }
1344
1345 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1346 {
1347         struct tevent_req *req = tevent_req_callback_data(
1348                 subreq, struct tevent_req);
1349         struct rpc_api_pipe_state *state = tevent_req_data(
1350                 req, struct rpc_api_pipe_state);
1351         NTSTATUS status;
1352         char *rdata = NULL;
1353         uint32_t rdata_len = 0;
1354
1355         status = get_complete_frag_recv(subreq);
1356         TALLOC_FREE(subreq);
1357         if (!NT_STATUS_IS_OK(status)) {
1358                 DEBUG(5, ("get_complete_frag failed: %s\n",
1359                           nt_errstr(status)));
1360                 tevent_req_nterror(req, status);
1361                 return;
1362         }
1363
1364         status = cli_pipe_validate_current_pdu(
1365                 state->cli, &state->rhdr, &state->incoming_frag,
1366                 state->expected_pkt_type, &rdata, &rdata_len,
1367                 &state->incoming_pdu);
1368
1369         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1370                   (unsigned)prs_data_size(&state->incoming_frag),
1371                   (unsigned)state->incoming_pdu_offset,
1372                   nt_errstr(status)));
1373
1374         if (!NT_STATUS_IS_OK(status)) {
1375                 tevent_req_nterror(req, status);
1376                 return;
1377         }
1378
1379         if ((state->rhdr.flags & RPC_FLG_FIRST)
1380             && (state->rhdr.pack_type[0] == 0)) {
1381                 /*
1382                  * Set the data type correctly for big-endian data on the
1383                  * first packet.
1384                  */
1385                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1386                           "big-endian.\n",
1387                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1388                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1389         }
1390         /*
1391          * Check endianness on subsequent packets.
1392          */
1393         if (state->incoming_frag.bigendian_data
1394             != state->incoming_pdu.bigendian_data) {
1395                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1396                          "%s\n",
1397                          state->incoming_pdu.bigendian_data?"big":"little",
1398                          state->incoming_frag.bigendian_data?"big":"little"));
1399                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1400                 return;
1401         }
1402
1403         /* Now copy the data portion out of the pdu into rbuf. */
1404         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1405                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1406                 return;
1407         }
1408
1409         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1410                rdata, (size_t)rdata_len);
1411         state->incoming_pdu_offset += rdata_len;
1412
1413         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1414                                             &state->incoming_frag);
1415         if (!NT_STATUS_IS_OK(status)) {
1416                 tevent_req_nterror(req, status);
1417                 return;
1418         }
1419
1420         if (state->rhdr.flags & RPC_FLG_LAST) {
1421                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1422                           rpccli_pipe_txt(debug_ctx(), state->cli),
1423                           (unsigned)prs_data_size(&state->incoming_pdu)));
1424                 tevent_req_done(req);
1425                 return;
1426         }
1427
1428         subreq = get_complete_frag_send(state, state->ev, state->cli,
1429                                         &state->rhdr, &state->incoming_frag);
1430         if (tevent_req_nomem(subreq, req)) {
1431                 return;
1432         }
1433         tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1434 }
1435
1436 static NTSTATUS rpc_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1437                                   prs_struct *reply_pdu)
1438 {
1439         struct rpc_api_pipe_state *state = tevent_req_data(
1440                 req, struct rpc_api_pipe_state);
1441         NTSTATUS status;
1442
1443         if (tevent_req_is_nterror(req, &status)) {
1444                 return status;
1445         }
1446
1447         *reply_pdu = state->incoming_pdu;
1448         reply_pdu->mem_ctx = mem_ctx;
1449
1450         /*
1451          * Prevent state->incoming_pdu from being freed in
1452          * rpc_api_pipe_state_destructor()
1453          */
1454         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1455
1456         return NT_STATUS_OK;
1457 }
1458
1459 /*******************************************************************
1460  Creates krb5 auth bind.
1461  ********************************************************************/
1462
1463 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1464                                                 enum pipe_auth_level auth_level,
1465                                                 RPC_HDR_AUTH *pauth_out,
1466                                                 prs_struct *auth_data)
1467 {
1468 #ifdef HAVE_KRB5
1469         int ret;
1470         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1471         DATA_BLOB tkt = data_blob_null;
1472         DATA_BLOB tkt_wrapped = data_blob_null;
1473
1474         /* We may change the pad length before marshalling. */
1475         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1476
1477         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1478                 a->service_principal ));
1479
1480         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1481
1482         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1483                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1484
1485         if (ret) {
1486                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1487                         "failed with %s\n",
1488                         a->service_principal,
1489                         error_message(ret) ));
1490
1491                 data_blob_free(&tkt);
1492                 prs_mem_free(auth_data);
1493                 return NT_STATUS_INVALID_PARAMETER;
1494         }
1495
1496         /* wrap that up in a nice GSS-API wrapping */
1497         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1498
1499         data_blob_free(&tkt);
1500
1501         /* Auth len in the rpc header doesn't include auth_header. */
1502         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1503                 data_blob_free(&tkt_wrapped);
1504                 prs_mem_free(auth_data);
1505                 return NT_STATUS_NO_MEMORY;
1506         }
1507
1508         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1509         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1510
1511         data_blob_free(&tkt_wrapped);
1512         return NT_STATUS_OK;
1513 #else
1514         return NT_STATUS_INVALID_PARAMETER;
1515 #endif
1516 }
1517
1518 /*******************************************************************
1519  Creates SPNEGO NTLMSSP auth bind.
1520  ********************************************************************/
1521
1522 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1523                                                 enum pipe_auth_level auth_level,
1524                                                 RPC_HDR_AUTH *pauth_out,
1525                                                 prs_struct *auth_data)
1526 {
1527         NTSTATUS nt_status;
1528         DATA_BLOB null_blob = data_blob_null;
1529         DATA_BLOB request = data_blob_null;
1530         DATA_BLOB spnego_msg = data_blob_null;
1531
1532         /* We may change the pad length before marshalling. */
1533         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1534
1535         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1536         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1537                                         null_blob,
1538                                         &request);
1539
1540         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1541                 data_blob_free(&request);
1542                 prs_mem_free(auth_data);
1543                 return nt_status;
1544         }
1545
1546         /* Wrap this in SPNEGO. */
1547         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1548
1549         data_blob_free(&request);
1550
1551         /* Auth len in the rpc header doesn't include auth_header. */
1552         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1553                 data_blob_free(&spnego_msg);
1554                 prs_mem_free(auth_data);
1555                 return NT_STATUS_NO_MEMORY;
1556         }
1557
1558         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1559         dump_data(5, spnego_msg.data, spnego_msg.length);
1560
1561         data_blob_free(&spnego_msg);
1562         return NT_STATUS_OK;
1563 }
1564
1565 /*******************************************************************
1566  Creates NTLMSSP auth bind.
1567  ********************************************************************/
1568
1569 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1570                                                 enum pipe_auth_level auth_level,
1571                                                 RPC_HDR_AUTH *pauth_out,
1572                                                 prs_struct *auth_data)
1573 {
1574         NTSTATUS nt_status;
1575         DATA_BLOB null_blob = data_blob_null;
1576         DATA_BLOB request = data_blob_null;
1577
1578         /* We may change the pad length before marshalling. */
1579         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1580
1581         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1582         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1583                                         null_blob,
1584                                         &request);
1585
1586         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1587                 data_blob_free(&request);
1588                 prs_mem_free(auth_data);
1589                 return nt_status;
1590         }
1591
1592         /* Auth len in the rpc header doesn't include auth_header. */
1593         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1594                 data_blob_free(&request);
1595                 prs_mem_free(auth_data);
1596                 return NT_STATUS_NO_MEMORY;
1597         }
1598
1599         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1600         dump_data(5, request.data, request.length);
1601
1602         data_blob_free(&request);
1603         return NT_STATUS_OK;
1604 }
1605
1606 /*******************************************************************
1607  Creates schannel auth bind.
1608  ********************************************************************/
1609
1610 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1611                                                 enum pipe_auth_level auth_level,
1612                                                 RPC_HDR_AUTH *pauth_out,
1613                                                 prs_struct *auth_data)
1614 {
1615         struct NL_AUTH_MESSAGE r;
1616         enum ndr_err_code ndr_err;
1617         DATA_BLOB blob;
1618
1619         /* We may change the pad length before marshalling. */
1620         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1621
1622         /* Use lp_workgroup() if domain not specified */
1623
1624         if (!cli->auth->domain || !cli->auth->domain[0]) {
1625                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1626                 if (cli->auth->domain == NULL) {
1627                         return NT_STATUS_NO_MEMORY;
1628                 }
1629         }
1630
1631         /*
1632          * Now marshall the data into the auth parse_struct.
1633          */
1634
1635         r.MessageType                   = NL_NEGOTIATE_REQUEST;
1636         r.Flags                         = NL_FLAG_OEM_NETBIOS_DOMAIN_NAME |
1637                                           NL_FLAG_OEM_NETBIOS_COMPUTER_NAME;
1638         r.oem_netbios_domain.a          = cli->auth->domain;
1639         r.oem_netbios_computer.a        = global_myname();
1640
1641         ndr_err = ndr_push_struct_blob(&blob, talloc_tos(), NULL, &r,
1642                        (ndr_push_flags_fn_t)ndr_push_NL_AUTH_MESSAGE);
1643         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1644                 DEBUG(0,("Failed to marshall NL_AUTH_MESSAGE.\n"));
1645                 prs_mem_free(auth_data);
1646                 return ndr_map_error2ntstatus(ndr_err);
1647         }
1648
1649         if (DEBUGLEVEL >= 10) {
1650                 NDR_PRINT_DEBUG(NL_AUTH_MESSAGE, &r);
1651         }
1652
1653         if (!prs_copy_data_in(auth_data, (const char *)blob.data, blob.length))
1654         {
1655                 prs_mem_free(auth_data);
1656                 return NT_STATUS_NO_MEMORY;
1657         }
1658
1659         return NT_STATUS_OK;
1660 }
1661
1662 /*******************************************************************
1663  Creates the internals of a DCE/RPC bind request or alter context PDU.
1664  ********************************************************************/
1665
1666 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1667                                                 prs_struct *rpc_out, 
1668                                                 uint32 rpc_call_id,
1669                                                 const struct ndr_syntax_id *abstract,
1670                                                 const struct ndr_syntax_id *transfer,
1671                                                 RPC_HDR_AUTH *phdr_auth,
1672                                                 prs_struct *pauth_info)
1673 {
1674         RPC_HDR hdr;
1675         RPC_HDR_RB hdr_rb;
1676         RPC_CONTEXT rpc_ctx;
1677         uint16 auth_len = prs_offset(pauth_info);
1678         uint8 ss_padding_len = 0;
1679         uint16 frag_len = 0;
1680
1681         /* create the RPC context. */
1682         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1683
1684         /* create the bind request RPC_HDR_RB */
1685         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1686
1687         /* Start building the frag length. */
1688         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1689
1690         /* Do we need to pad ? */
1691         if (auth_len) {
1692                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1693                 if (data_len % 8) {
1694                         ss_padding_len = 8 - (data_len % 8);
1695                         phdr_auth->auth_pad_len = ss_padding_len;
1696                 }
1697                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1698         }
1699
1700         /* Create the request RPC_HDR */
1701         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1702
1703         /* Marshall the RPC header */
1704         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1705                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1706                 return NT_STATUS_NO_MEMORY;
1707         }
1708
1709         /* Marshall the bind request data */
1710         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1711                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1712                 return NT_STATUS_NO_MEMORY;
1713         }
1714
1715         /*
1716          * Grow the outgoing buffer to store any auth info.
1717          */
1718
1719         if(auth_len != 0) {
1720                 if (ss_padding_len) {
1721                         char pad[8];
1722                         memset(pad, '\0', 8);
1723                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1724                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1725                                 return NT_STATUS_NO_MEMORY;
1726                         }
1727                 }
1728
1729                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1730                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1731                         return NT_STATUS_NO_MEMORY;
1732                 }
1733
1734
1735                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1736                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1737                         return NT_STATUS_NO_MEMORY;
1738                 }
1739         }
1740
1741         return NT_STATUS_OK;
1742 }
1743
1744 /*******************************************************************
1745  Creates a DCE/RPC bind request.
1746  ********************************************************************/
1747
1748 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1749                                 prs_struct *rpc_out, 
1750                                 uint32 rpc_call_id,
1751                                 const struct ndr_syntax_id *abstract,
1752                                 const struct ndr_syntax_id *transfer,
1753                                 enum pipe_auth_type auth_type,
1754                                 enum pipe_auth_level auth_level)
1755 {
1756         RPC_HDR_AUTH hdr_auth;
1757         prs_struct auth_info;
1758         NTSTATUS ret = NT_STATUS_OK;
1759
1760         ZERO_STRUCT(hdr_auth);
1761         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1762                 return NT_STATUS_NO_MEMORY;
1763
1764         switch (auth_type) {
1765                 case PIPE_AUTH_TYPE_SCHANNEL:
1766                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1767                         if (!NT_STATUS_IS_OK(ret)) {
1768                                 prs_mem_free(&auth_info);
1769                                 return ret;
1770                         }
1771                         break;
1772
1773                 case PIPE_AUTH_TYPE_NTLMSSP:
1774                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1775                         if (!NT_STATUS_IS_OK(ret)) {
1776                                 prs_mem_free(&auth_info);
1777                                 return ret;
1778                         }
1779                         break;
1780
1781                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1782                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1783                         if (!NT_STATUS_IS_OK(ret)) {
1784                                 prs_mem_free(&auth_info);
1785                                 return ret;
1786                         }
1787                         break;
1788
1789                 case PIPE_AUTH_TYPE_KRB5:
1790                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1791                         if (!NT_STATUS_IS_OK(ret)) {
1792                                 prs_mem_free(&auth_info);
1793                                 return ret;
1794                         }
1795                         break;
1796
1797                 case PIPE_AUTH_TYPE_NONE:
1798                         break;
1799
1800                 default:
1801                         /* "Can't" happen. */
1802                         return NT_STATUS_INVALID_INFO_CLASS;
1803         }
1804
1805         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1806                                                 rpc_out, 
1807                                                 rpc_call_id,
1808                                                 abstract,
1809                                                 transfer,
1810                                                 &hdr_auth,
1811                                                 &auth_info);
1812
1813         prs_mem_free(&auth_info);
1814         return ret;
1815 }
1816
1817 /*******************************************************************
1818  Create and add the NTLMSSP sign/seal auth header and data.
1819  ********************************************************************/
1820
1821 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1822                                         RPC_HDR *phdr,
1823                                         uint32 ss_padding_len,
1824                                         prs_struct *outgoing_pdu)
1825 {
1826         RPC_HDR_AUTH auth_info;
1827         NTSTATUS status;
1828         DATA_BLOB auth_blob = data_blob_null;
1829         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1830
1831         if (!cli->auth->a_u.ntlmssp_state) {
1832                 return NT_STATUS_INVALID_PARAMETER;
1833         }
1834
1835         /* Init and marshall the auth header. */
1836         init_rpc_hdr_auth(&auth_info,
1837                         map_pipe_auth_type_to_rpc_auth_type(
1838                                 cli->auth->auth_type),
1839                         cli->auth->auth_level,
1840                         ss_padding_len,
1841                         1 /* context id. */);
1842
1843         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1844                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1845                 data_blob_free(&auth_blob);
1846                 return NT_STATUS_NO_MEMORY;
1847         }
1848
1849         switch (cli->auth->auth_level) {
1850                 case PIPE_AUTH_LEVEL_PRIVACY:
1851                         /* Data portion is encrypted. */
1852                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1853                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1854                                         data_and_pad_len,
1855                                         (unsigned char *)prs_data_p(outgoing_pdu),
1856                                         (size_t)prs_offset(outgoing_pdu),
1857                                         &auth_blob);
1858                         if (!NT_STATUS_IS_OK(status)) {
1859                                 data_blob_free(&auth_blob);
1860                                 return status;
1861                         }
1862                         break;
1863
1864                 case PIPE_AUTH_LEVEL_INTEGRITY:
1865                         /* Data is signed. */
1866                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1867                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1868                                         data_and_pad_len,
1869                                         (unsigned char *)prs_data_p(outgoing_pdu),
1870                                         (size_t)prs_offset(outgoing_pdu),
1871                                         &auth_blob);
1872                         if (!NT_STATUS_IS_OK(status)) {
1873                                 data_blob_free(&auth_blob);
1874                                 return status;
1875                         }
1876                         break;
1877
1878                 default:
1879                         /* Can't happen. */
1880                         smb_panic("bad auth level");
1881                         /* Notreached. */
1882                         return NT_STATUS_INVALID_PARAMETER;
1883         }
1884
1885         /* Finally marshall the blob. */
1886
1887         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1888                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1889                         (unsigned int)NTLMSSP_SIG_SIZE));
1890                 data_blob_free(&auth_blob);
1891                 return NT_STATUS_NO_MEMORY;
1892         }
1893
1894         data_blob_free(&auth_blob);
1895         return NT_STATUS_OK;
1896 }
1897
1898 /*******************************************************************
1899  Create and add the schannel sign/seal auth header and data.
1900  ********************************************************************/
1901
1902 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1903                                         RPC_HDR *phdr,
1904                                         uint32 ss_padding_len,
1905                                         prs_struct *outgoing_pdu)
1906 {
1907         RPC_HDR_AUTH auth_info;
1908         RPC_AUTH_SCHANNEL_CHK verf;
1909         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1910         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1911         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1912
1913         if (!sas) {
1914                 return NT_STATUS_INVALID_PARAMETER;
1915         }
1916
1917         /* Init and marshall the auth header. */
1918         init_rpc_hdr_auth(&auth_info,
1919                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1920                         cli->auth->auth_level,
1921                         ss_padding_len,
1922                         1 /* context id. */);
1923
1924         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1925                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1926                 return NT_STATUS_NO_MEMORY;
1927         }
1928
1929         switch (cli->auth->auth_level) {
1930                 case PIPE_AUTH_LEVEL_PRIVACY:
1931                 case PIPE_AUTH_LEVEL_INTEGRITY:
1932                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1933                                 sas->seq_num));
1934
1935                         schannel_encode(sas,
1936                                         cli->auth->auth_level,
1937                                         SENDER_IS_INITIATOR,
1938                                         &verf,
1939                                         data_p,
1940                                         data_and_pad_len);
1941
1942                         sas->seq_num++;
1943                         break;
1944
1945                 default:
1946                         /* Can't happen. */
1947                         smb_panic("bad auth level");
1948                         /* Notreached. */
1949                         return NT_STATUS_INVALID_PARAMETER;
1950         }
1951
1952         /* Finally marshall the blob. */
1953         smb_io_rpc_auth_schannel_chk("",
1954                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1955                         &verf,
1956                         outgoing_pdu,
1957                         0);
1958
1959         return NT_STATUS_OK;
1960 }
1961
1962 /*******************************************************************
1963  Calculate how much data we're going to send in this packet, also
1964  work out any sign/seal padding length.
1965  ********************************************************************/
1966
1967 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1968                                         uint32 data_left,
1969                                         uint16 *p_frag_len,
1970                                         uint16 *p_auth_len,
1971                                         uint32 *p_ss_padding)
1972 {
1973         uint32 data_space, data_len;
1974
1975 #ifdef DEVELOPER
1976         if ((data_left > 0) && (sys_random() % 2)) {
1977                 data_left = MAX(data_left/2, 1);
1978         }
1979 #endif
1980
1981         switch (cli->auth->auth_level) {
1982                 case PIPE_AUTH_LEVEL_NONE:
1983                 case PIPE_AUTH_LEVEL_CONNECT:
1984                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1985                         data_len = MIN(data_space, data_left);
1986                         *p_ss_padding = 0;
1987                         *p_auth_len = 0;
1988                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1989                         return data_len;
1990
1991                 case PIPE_AUTH_LEVEL_INTEGRITY:
1992                 case PIPE_AUTH_LEVEL_PRIVACY:
1993                         /* Treat the same for all authenticated rpc requests. */
1994                         switch(cli->auth->auth_type) {
1995                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1996                                 case PIPE_AUTH_TYPE_NTLMSSP:
1997                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1998                                         break;
1999                                 case PIPE_AUTH_TYPE_SCHANNEL:
2000                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2001                                         break;
2002                                 default:
2003                                         smb_panic("bad auth type");
2004                                         break;
2005                         }
2006
2007                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2008                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2009
2010                         data_len = MIN(data_space, data_left);
2011                         *p_ss_padding = 0;
2012                         if (data_len % 8) {
2013                                 *p_ss_padding = 8 - (data_len % 8);
2014                         }
2015                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2016                                         data_len + *p_ss_padding +              /* data plus padding. */
2017                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2018                         return data_len;
2019
2020                 default:
2021                         smb_panic("bad auth level");
2022                         /* Notreached. */
2023                         return 0;
2024         }
2025 }
2026
2027 /*******************************************************************
2028  External interface.
2029  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2030  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2031  and deals with signing/sealing details.
2032  ********************************************************************/
2033
2034 struct rpc_api_pipe_req_state {
2035         struct event_context *ev;
2036         struct rpc_pipe_client *cli;
2037         uint8_t op_num;
2038         uint32_t call_id;
2039         prs_struct *req_data;
2040         uint32_t req_data_sent;
2041         prs_struct outgoing_frag;
2042         prs_struct reply_pdu;
2043 };
2044
2045 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2046 {
2047         prs_mem_free(&s->outgoing_frag);
2048         prs_mem_free(&s->reply_pdu);
2049         return 0;
2050 }
2051
2052 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2053 static void rpc_api_pipe_req_done(struct tevent_req *subreq);
2054 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2055                                   bool *is_last_frag);
2056
2057 struct tevent_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2058                                          struct event_context *ev,
2059                                          struct rpc_pipe_client *cli,
2060                                          uint8_t op_num,
2061                                          prs_struct *req_data)
2062 {
2063         struct tevent_req *req, *subreq;
2064         struct rpc_api_pipe_req_state *state;
2065         NTSTATUS status;
2066         bool is_last_frag;
2067
2068         req = tevent_req_create(mem_ctx, &state,
2069                                 struct rpc_api_pipe_req_state);
2070         if (req == NULL) {
2071                 return NULL;
2072         }
2073         state->ev = ev;
2074         state->cli = cli;
2075         state->op_num = op_num;
2076         state->req_data = req_data;
2077         state->req_data_sent = 0;
2078         state->call_id = get_rpc_call_id();
2079
2080         if (cli->max_xmit_frag
2081             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2082                 /* Server is screwed up ! */
2083                 status = NT_STATUS_INVALID_PARAMETER;
2084                 goto post_status;
2085         }
2086
2087         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2088
2089         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2090                       state, MARSHALL)) {
2091                 goto fail;
2092         }
2093
2094         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2095
2096         status = prepare_next_frag(state, &is_last_frag);
2097         if (!NT_STATUS_IS_OK(status)) {
2098                 goto post_status;
2099         }
2100
2101         if (is_last_frag) {
2102                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2103                                            &state->outgoing_frag,
2104                                            RPC_RESPONSE);
2105                 if (subreq == NULL) {
2106                         goto fail;
2107                 }
2108                 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2109         } else {
2110                 subreq = rpc_write_send(
2111                         state, ev, cli->transport,
2112                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2113                         prs_offset(&state->outgoing_frag));
2114                 if (subreq == NULL) {
2115                         goto fail;
2116                 }
2117                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2118                                         req);
2119         }
2120         return req;
2121
2122  post_status:
2123         tevent_req_nterror(req, status);
2124         return tevent_req_post(req, ev);
2125  fail:
2126         TALLOC_FREE(req);
2127         return NULL;
2128 }
2129
2130 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2131                                   bool *is_last_frag)
2132 {
2133         RPC_HDR hdr;
2134         RPC_HDR_REQ hdr_req;
2135         uint32_t data_sent_thistime;
2136         uint16_t auth_len;
2137         uint16_t frag_len;
2138         uint8_t flags = 0;
2139         uint32_t ss_padding;
2140         uint32_t data_left;
2141         char pad[8] = { 0, };
2142         NTSTATUS status;
2143
2144         data_left = prs_offset(state->req_data) - state->req_data_sent;
2145
2146         data_sent_thistime = calculate_data_len_tosend(
2147                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2148
2149         if (state->req_data_sent == 0) {
2150                 flags = RPC_FLG_FIRST;
2151         }
2152
2153         if (data_sent_thistime == data_left) {
2154                 flags |= RPC_FLG_LAST;
2155         }
2156
2157         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2158                 return NT_STATUS_NO_MEMORY;
2159         }
2160
2161         /* Create and marshall the header and request header. */
2162         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2163                      auth_len);
2164
2165         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2166                 return NT_STATUS_NO_MEMORY;
2167         }
2168
2169         /* Create the rpc request RPC_HDR_REQ */
2170         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2171                          state->op_num);
2172
2173         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2174                                 &state->outgoing_frag, 0)) {
2175                 return NT_STATUS_NO_MEMORY;
2176         }
2177
2178         /* Copy in the data, plus any ss padding. */
2179         if (!prs_append_some_prs_data(&state->outgoing_frag,
2180                                       state->req_data, state->req_data_sent,
2181                                       data_sent_thistime)) {
2182                 return NT_STATUS_NO_MEMORY;
2183         }
2184
2185         /* Copy the sign/seal padding data. */
2186         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2187                 return NT_STATUS_NO_MEMORY;
2188         }
2189
2190         /* Generate any auth sign/seal and add the auth footer. */
2191         switch (state->cli->auth->auth_type) {
2192         case PIPE_AUTH_TYPE_NONE:
2193                 status = NT_STATUS_OK;
2194                 break;
2195         case PIPE_AUTH_TYPE_NTLMSSP:
2196         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2197                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2198                                                  &state->outgoing_frag);
2199                 break;
2200         case PIPE_AUTH_TYPE_SCHANNEL:
2201                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2202                                                   &state->outgoing_frag);
2203                 break;
2204         default:
2205                 status = NT_STATUS_INVALID_PARAMETER;
2206                 break;
2207         }
2208
2209         state->req_data_sent += data_sent_thistime;
2210         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2211
2212         return status;
2213 }
2214
2215 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2216 {
2217         struct tevent_req *req = tevent_req_callback_data(
2218                 subreq, struct tevent_req);
2219         struct rpc_api_pipe_req_state *state = tevent_req_data(
2220                 req, struct rpc_api_pipe_req_state);
2221         NTSTATUS status;
2222         bool is_last_frag;
2223
2224         status = rpc_write_recv(subreq);
2225         TALLOC_FREE(subreq);
2226         if (!NT_STATUS_IS_OK(status)) {
2227                 tevent_req_nterror(req, status);
2228                 return;
2229         }
2230
2231         status = prepare_next_frag(state, &is_last_frag);
2232         if (!NT_STATUS_IS_OK(status)) {
2233                 tevent_req_nterror(req, status);
2234                 return;
2235         }
2236
2237         if (is_last_frag) {
2238                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2239                                            &state->outgoing_frag,
2240                                            RPC_RESPONSE);
2241                 if (tevent_req_nomem(subreq, req)) {
2242                         return;
2243                 }
2244                 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2245         } else {
2246                 subreq = rpc_write_send(
2247                         state, state->ev,
2248                         state->cli->transport,
2249                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2250                         prs_offset(&state->outgoing_frag));
2251                 if (tevent_req_nomem(subreq, req)) {
2252                         return;
2253                 }
2254                 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2255                                         req);
2256         }
2257 }
2258
2259 static void rpc_api_pipe_req_done(struct tevent_req *subreq)
2260 {
2261         struct tevent_req *req = tevent_req_callback_data(
2262                 subreq, struct tevent_req);
2263         struct rpc_api_pipe_req_state *state = tevent_req_data(
2264                 req, struct rpc_api_pipe_req_state);
2265         NTSTATUS status;
2266
2267         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2268         TALLOC_FREE(subreq);
2269         if (!NT_STATUS_IS_OK(status)) {
2270                 tevent_req_nterror(req, status);
2271                 return;
2272         }
2273         tevent_req_done(req);
2274 }
2275
2276 NTSTATUS rpc_api_pipe_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2277                                prs_struct *reply_pdu)
2278 {
2279         struct rpc_api_pipe_req_state *state = tevent_req_data(
2280                 req, struct rpc_api_pipe_req_state);
2281         NTSTATUS status;
2282
2283         if (tevent_req_is_nterror(req, &status)) {
2284                 /*
2285                  * We always have to initialize to reply pdu, even if there is
2286                  * none. The rpccli_* caller routines expect this.
2287                  */
2288                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2289                 return status;
2290         }
2291
2292         *reply_pdu = state->reply_pdu;
2293         reply_pdu->mem_ctx = mem_ctx;
2294
2295         /*
2296          * Prevent state->req_pdu from being freed in
2297          * rpc_api_pipe_req_state_destructor()
2298          */
2299         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2300
2301         return NT_STATUS_OK;
2302 }
2303
2304 #if 0
2305 /****************************************************************************
2306  Set the handle state.
2307 ****************************************************************************/
2308
2309 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2310                                    const char *pipe_name, uint16 device_state)
2311 {
2312         bool state_set = False;
2313         char param[2];
2314         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2315         char *rparam = NULL;
2316         char *rdata = NULL;
2317         uint32 rparam_len, rdata_len;
2318
2319         if (pipe_name == NULL)
2320                 return False;
2321
2322         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2323                  cli->fnum, pipe_name, device_state));
2324
2325         /* create parameters: device state */
2326         SSVAL(param, 0, device_state);
2327
2328         /* create setup parameters. */
2329         setup[0] = 0x0001; 
2330         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2331
2332         /* send the data on \PIPE\ */
2333         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2334                     setup, 2, 0,                /* setup, length, max */
2335                     param, 2, 0,                /* param, length, max */
2336                     NULL, 0, 1024,              /* data, length, max */
2337                     &rparam, &rparam_len,        /* return param, length */
2338                     &rdata, &rdata_len))         /* return data, length */
2339         {
2340                 DEBUG(5, ("Set Handle state: return OK\n"));
2341                 state_set = True;
2342         }
2343
2344         SAFE_FREE(rparam);
2345         SAFE_FREE(rdata);
2346
2347         return state_set;
2348 }
2349 #endif
2350
2351 /****************************************************************************
2352  Check the rpc bind acknowledge response.
2353 ****************************************************************************/
2354
2355 static bool check_bind_response(RPC_HDR_BA *hdr_ba,
2356                                 const struct ndr_syntax_id *transfer)
2357 {
2358         if ( hdr_ba->addr.len == 0) {
2359                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2360         }
2361
2362         /* check the transfer syntax */
2363         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2364              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2365                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2366                 return False;
2367         }
2368
2369         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2370                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2371                           hdr_ba->res.num_results, hdr_ba->res.reason));
2372         }
2373
2374         DEBUG(5,("check_bind_response: accepted!\n"));
2375         return True;
2376 }
2377
2378 /*******************************************************************
2379  Creates a DCE/RPC bind authentication response.
2380  This is the packet that is sent back to the server once we
2381  have received a BIND-ACK, to finish the third leg of
2382  the authentication handshake.
2383  ********************************************************************/
2384
2385 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2386                                 uint32 rpc_call_id,
2387                                 enum pipe_auth_type auth_type,
2388                                 enum pipe_auth_level auth_level,
2389                                 DATA_BLOB *pauth_blob,
2390                                 prs_struct *rpc_out)
2391 {
2392         RPC_HDR hdr;
2393         RPC_HDR_AUTH hdr_auth;
2394         uint32 pad = 0;
2395
2396         /* Create the request RPC_HDR */
2397         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2398                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2399                      pauth_blob->length );
2400
2401         /* Marshall it. */
2402         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2403                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2404                 return NT_STATUS_NO_MEMORY;
2405         }
2406
2407         /*
2408                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2409                 about padding - shouldn't this pad to length 8 ? JRA.
2410         */
2411
2412         /* 4 bytes padding. */
2413         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2414                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2415                 return NT_STATUS_NO_MEMORY;
2416         }
2417
2418         /* Create the request RPC_HDR_AUTHA */
2419         init_rpc_hdr_auth(&hdr_auth,
2420                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2421                         auth_level, 0, 1);
2422
2423         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2424                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2425                 return NT_STATUS_NO_MEMORY;
2426         }
2427
2428         /*
2429          * Append the auth data to the outgoing buffer.
2430          */
2431
2432         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2433                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2434                 return NT_STATUS_NO_MEMORY;
2435         }
2436
2437         return NT_STATUS_OK;
2438 }
2439
2440 /*******************************************************************
2441  Creates a DCE/RPC bind alter context authentication request which
2442  may contain a spnego auth blobl
2443  ********************************************************************/
2444
2445 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2446                                         const struct ndr_syntax_id *abstract,
2447                                         const struct ndr_syntax_id *transfer,
2448                                         enum pipe_auth_level auth_level,
2449                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2450                                         prs_struct *rpc_out)
2451 {
2452         RPC_HDR_AUTH hdr_auth;
2453         prs_struct auth_info;
2454         NTSTATUS ret = NT_STATUS_OK;
2455
2456         ZERO_STRUCT(hdr_auth);
2457         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2458                 return NT_STATUS_NO_MEMORY;
2459
2460         /* We may change the pad length before marshalling. */
2461         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2462
2463         if (pauth_blob->length) {
2464                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2465                         prs_mem_free(&auth_info);
2466                         return NT_STATUS_NO_MEMORY;
2467                 }
2468         }
2469
2470         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2471                                                 rpc_out, 
2472                                                 rpc_call_id,
2473                                                 abstract,
2474                                                 transfer,
2475                                                 &hdr_auth,
2476                                                 &auth_info);
2477         prs_mem_free(&auth_info);
2478         return ret;
2479 }
2480
2481 /****************************************************************************
2482  Do an rpc bind.
2483 ****************************************************************************/
2484
2485 struct rpc_pipe_bind_state {
2486         struct event_context *ev;
2487         struct rpc_pipe_client *cli;
2488         prs_struct rpc_out;
2489         uint32_t rpc_call_id;
2490 };
2491
2492 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2493 {
2494         prs_mem_free(&state->rpc_out);
2495         return 0;
2496 }
2497
2498 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq);
2499 static NTSTATUS rpc_finish_auth3_bind_send(struct tevent_req *req,
2500                                            struct rpc_pipe_bind_state *state,
2501                                            struct rpc_hdr_info *phdr,
2502                                            prs_struct *reply_pdu);
2503 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2504 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct tevent_req *req,
2505                                                     struct rpc_pipe_bind_state *state,
2506                                                     struct rpc_hdr_info *phdr,
2507                                                     prs_struct *reply_pdu);
2508 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq);
2509
2510 struct tevent_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2511                                       struct event_context *ev,
2512                                       struct rpc_pipe_client *cli,
2513                                       struct cli_pipe_auth_data *auth)
2514 {
2515         struct tevent_req *req, *subreq;
2516         struct rpc_pipe_bind_state *state;
2517         NTSTATUS status;
2518
2519         req = tevent_req_create(mem_ctx, &state, struct rpc_pipe_bind_state);
2520         if (req == NULL) {
2521                 return NULL;
2522         }
2523
2524         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2525                 rpccli_pipe_txt(debug_ctx(), cli),
2526                 (unsigned int)auth->auth_type,
2527                 (unsigned int)auth->auth_level ));
2528
2529         state->ev = ev;
2530         state->cli = cli;
2531         state->rpc_call_id = get_rpc_call_id();
2532
2533         prs_init_empty(&state->rpc_out, state, MARSHALL);
2534         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2535
2536         cli->auth = talloc_move(cli, &auth);
2537
2538         /* Marshall the outgoing data. */
2539         status = create_rpc_bind_req(cli, &state->rpc_out,
2540                                      state->rpc_call_id,
2541                                      &cli->abstract_syntax,
2542                                      &cli->transfer_syntax,
2543                                      cli->auth->auth_type,
2544                                      cli->auth->auth_level);
2545
2546         if (!NT_STATUS_IS_OK(status)) {
2547                 goto post_status;
2548         }
2549
2550         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2551                                    RPC_BINDACK);
2552         if (subreq == NULL) {
2553                 goto fail;
2554         }
2555         tevent_req_set_callback(subreq, rpc_pipe_bind_step_one_done, req);
2556         return req;
2557
2558  post_status:
2559         tevent_req_nterror(req, status);
2560         return tevent_req_post(req, ev);
2561  fail:
2562         TALLOC_FREE(req);
2563         return NULL;
2564 }
2565
2566 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq)
2567 {
2568         struct tevent_req *req = tevent_req_callback_data(
2569                 subreq, struct tevent_req);
2570         struct rpc_pipe_bind_state *state = tevent_req_data(
2571                 req, struct rpc_pipe_bind_state);
2572         prs_struct reply_pdu;
2573         struct rpc_hdr_info hdr;
2574         struct rpc_hdr_ba_info hdr_ba;
2575         NTSTATUS status;
2576
2577         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2578         TALLOC_FREE(subreq);
2579         if (!NT_STATUS_IS_OK(status)) {
2580                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2581                           rpccli_pipe_txt(debug_ctx(), state->cli),
2582                           nt_errstr(status)));
2583                 tevent_req_nterror(req, status);
2584                 return;
2585         }
2586
2587         /* Unmarshall the RPC header */
2588         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2589                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2590                 prs_mem_free(&reply_pdu);
2591                 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2592                 return;
2593         }
2594
2595         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2596                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2597                           "RPC_HDR_BA.\n"));
2598                 prs_mem_free(&reply_pdu);
2599                 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2600                 return;
2601         }
2602
2603         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2604                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2605                 prs_mem_free(&reply_pdu);
2606                 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2607                 return;
2608         }
2609
2610         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2611         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2612
2613         /*
2614          * For authenticated binds we may need to do 3 or 4 leg binds.
2615          */
2616
2617         switch(state->cli->auth->auth_type) {
2618
2619         case PIPE_AUTH_TYPE_NONE:
2620         case PIPE_AUTH_TYPE_SCHANNEL:
2621                 /* Bind complete. */
2622                 prs_mem_free(&reply_pdu);
2623                 tevent_req_done(req);
2624                 break;
2625
2626         case PIPE_AUTH_TYPE_NTLMSSP:
2627                 /* Need to send AUTH3 packet - no reply. */
2628                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2629                                                     &reply_pdu);
2630                 prs_mem_free(&reply_pdu);
2631                 if (!NT_STATUS_IS_OK(status)) {
2632                         tevent_req_nterror(req, status);
2633                 }
2634                 break;
2635
2636         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2637                 /* Need to send alter context request and reply. */
2638                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2639                                                              &reply_pdu);
2640                 prs_mem_free(&reply_pdu);
2641                 if (!NT_STATUS_IS_OK(status)) {
2642                         tevent_req_nterror(req, status);
2643                 }
2644                 break;
2645
2646         case PIPE_AUTH_TYPE_KRB5:
2647                 /* */
2648
2649         default:
2650                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2651                          (unsigned int)state->cli->auth->auth_type));
2652                 prs_mem_free(&reply_pdu);
2653                 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2654         }
2655 }
2656
2657 static NTSTATUS rpc_finish_auth3_bind_send(struct tevent_req *req,
2658                                            struct rpc_pipe_bind_state *state,
2659                                            struct rpc_hdr_info *phdr,
2660                                            prs_struct *reply_pdu)
2661 {
2662         DATA_BLOB server_response = data_blob_null;
2663         DATA_BLOB client_reply = data_blob_null;
2664         struct rpc_hdr_auth_info hdr_auth;
2665         struct tevent_req *subreq;
2666         NTSTATUS status;
2667
2668         if ((phdr->auth_len == 0)
2669             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2670                 return NT_STATUS_INVALID_PARAMETER;
2671         }
2672
2673         if (!prs_set_offset(
2674                     reply_pdu,
2675                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2676                 return NT_STATUS_INVALID_PARAMETER;
2677         }
2678
2679         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2680                 return NT_STATUS_INVALID_PARAMETER;
2681         }
2682
2683         /* TODO - check auth_type/auth_level match. */
2684
2685         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2686         prs_copy_data_out((char *)server_response.data, reply_pdu,
2687                           phdr->auth_len);
2688
2689         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2690                                 server_response, &client_reply);
2691
2692         if (!NT_STATUS_IS_OK(status)) {
2693                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2694                           "blob failed: %s.\n", nt_errstr(status)));
2695                 return status;
2696         }
2697
2698         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2699
2700         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2701                                        state->cli->auth->auth_type,
2702                                        state->cli->auth->auth_level,
2703                                        &client_reply, &state->rpc_out);
2704         data_blob_free(&client_reply);
2705
2706         if (!NT_STATUS_IS_OK(status)) {
2707                 return status;
2708         }
2709
2710         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2711                                 (uint8_t *)prs_data_p(&state->rpc_out),
2712                                 prs_offset(&state->rpc_out));
2713         if (subreq == NULL) {
2714                 return NT_STATUS_NO_MEMORY;
2715         }
2716         tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2717         return NT_STATUS_OK;
2718 }
2719
2720 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2721 {
2722         struct tevent_req *req = tevent_req_callback_data(
2723                 subreq, struct tevent_req);
2724         NTSTATUS status;
2725
2726         status = rpc_write_recv(subreq);
2727         TALLOC_FREE(subreq);
2728         if (!NT_STATUS_IS_OK(status)) {
2729                 tevent_req_nterror(req, status);
2730                 return;
2731         }
2732         tevent_req_done(req);
2733 }
2734
2735 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct tevent_req *req,
2736                                                     struct rpc_pipe_bind_state *state,
2737                                                     struct rpc_hdr_info *phdr,
2738                                                     prs_struct *reply_pdu)
2739 {
2740         DATA_BLOB server_spnego_response = data_blob_null;
2741         DATA_BLOB server_ntlm_response = data_blob_null;
2742         DATA_BLOB client_reply = data_blob_null;
2743         DATA_BLOB tmp_blob = data_blob_null;
2744         RPC_HDR_AUTH hdr_auth;
2745         struct tevent_req *subreq;
2746         NTSTATUS status;
2747
2748         if ((phdr->auth_len == 0)
2749             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2750                 return NT_STATUS_INVALID_PARAMETER;
2751         }
2752
2753         /* Process the returned NTLMSSP blob first. */
2754         if (!prs_set_offset(
2755                     reply_pdu,
2756                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2757                 return NT_STATUS_INVALID_PARAMETER;
2758         }
2759
2760         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2761                 return NT_STATUS_INVALID_PARAMETER;
2762         }
2763
2764         server_spnego_response = data_blob(NULL, phdr->auth_len);
2765         prs_copy_data_out((char *)server_spnego_response.data,
2766                           reply_pdu, phdr->auth_len);
2767
2768         /*
2769          * The server might give us back two challenges - tmp_blob is for the
2770          * second.
2771          */
2772         if (!spnego_parse_challenge(server_spnego_response,
2773                                     &server_ntlm_response, &tmp_blob)) {
2774                 data_blob_free(&server_spnego_response);
2775                 data_blob_free(&server_ntlm_response);
2776                 data_blob_free(&tmp_blob);
2777                 return NT_STATUS_INVALID_PARAMETER;
2778         }
2779
2780         /* We're finished with the server spnego response and the tmp_blob. */
2781         data_blob_free(&server_spnego_response);
2782         data_blob_free(&tmp_blob);
2783
2784         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2785                                 server_ntlm_response, &client_reply);
2786
2787         /* Finished with the server_ntlm response */
2788         data_blob_free(&server_ntlm_response);
2789
2790         if (!NT_STATUS_IS_OK(status)) {
2791                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2792                           "using server blob failed.\n"));
2793                 data_blob_free(&client_reply);
2794                 return status;
2795         }
2796
2797         /* SPNEGO wrap the client reply. */
2798         tmp_blob = spnego_gen_auth(client_reply);
2799         data_blob_free(&client_reply);
2800         client_reply = tmp_blob;
2801         tmp_blob = data_blob_null;
2802
2803         /* Now prepare the alter context pdu. */
2804         prs_init_empty(&state->rpc_out, state, MARSHALL);
2805
2806         status = create_rpc_alter_context(state->rpc_call_id,
2807                                           &state->cli->abstract_syntax,
2808                                           &state->cli->transfer_syntax,
2809                                           state->cli->auth->auth_level,
2810                                           &client_reply,
2811                                           &state->rpc_out);
2812         data_blob_free(&client_reply);
2813
2814         if (!NT_STATUS_IS_OK(status)) {
2815                 return status;
2816         }
2817
2818         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2819                                    &state->rpc_out, RPC_ALTCONTRESP);
2820         if (subreq == NULL) {
2821                 return NT_STATUS_NO_MEMORY;
2822         }
2823         tevent_req_set_callback(subreq, rpc_bind_ntlmssp_api_done, req);
2824         return NT_STATUS_OK;
2825 }
2826
2827 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq)
2828 {
2829         struct tevent_req *req = tevent_req_callback_data(
2830                 subreq, struct tevent_req);
2831         struct rpc_pipe_bind_state *state = tevent_req_data(
2832                 req, struct rpc_pipe_bind_state);
2833         DATA_BLOB server_spnego_response = data_blob_null;
2834         DATA_BLOB tmp_blob = data_blob_null;
2835         prs_struct reply_pdu;
2836         struct rpc_hdr_info hdr;
2837         struct rpc_hdr_auth_info hdr_auth;
2838         NTSTATUS status;
2839
2840         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2841         TALLOC_FREE(subreq);
2842         if (!NT_STATUS_IS_OK(status)) {
2843                 tevent_req_nterror(req, status);
2844                 return;
2845         }
2846
2847         /* Get the auth blob from the reply. */
2848         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2849                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2850                           "unmarshall RPC_HDR.\n"));
2851                 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2852                 return;
2853         }
2854
2855         if (!prs_set_offset(
2856                     &reply_pdu,
2857                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2858                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2859                 return;
2860         }
2861
2862         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2863                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2864                 return;
2865         }
2866
2867         server_spnego_response = data_blob(NULL, hdr.auth_len);
2868         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2869                           hdr.auth_len);
2870
2871         /* Check we got a valid auth response. */
2872         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2873                                         OID_NTLMSSP, &tmp_blob)) {
2874                 data_blob_free(&server_spnego_response);
2875                 data_blob_free(&tmp_blob);
2876                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2877                 return;
2878         }
2879
2880         data_blob_free(&server_spnego_response);
2881         data_blob_free(&tmp_blob);
2882
2883         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2884                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2885         tevent_req_done(req);
2886 }
2887
2888 NTSTATUS rpc_pipe_bind_recv(struct tevent_req *req)
2889 {
2890         return tevent_req_simple_recv_ntstatus(req);
2891 }
2892
2893 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2894                        struct cli_pipe_auth_data *auth)
2895 {
2896         TALLOC_CTX *frame = talloc_stackframe();
2897         struct event_context *ev;
2898         struct tevent_req *req;
2899         NTSTATUS status = NT_STATUS_OK;
2900
2901         ev = event_context_init(frame);
2902         if (ev == NULL) {
2903                 status = NT_STATUS_NO_MEMORY;
2904                 goto fail;
2905         }
2906
2907         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2908         if (req == NULL) {
2909                 status = NT_STATUS_NO_MEMORY;
2910                 goto fail;
2911         }
2912
2913         if (!tevent_req_poll(req, ev)) {
2914                 status = map_nt_error_from_unix(errno);
2915                 goto fail;
2916         }
2917
2918         status = rpc_pipe_bind_recv(req);
2919  fail:
2920         TALLOC_FREE(frame);
2921         return status;
2922 }
2923
2924 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2925                                 unsigned int timeout)
2926 {
2927         struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2928
2929         if (cli == NULL) {
2930                 return 0;
2931         }
2932         return cli_set_timeout(cli, timeout);
2933 }
2934
2935 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2936 {
2937         struct cli_state *cli;
2938
2939         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2940             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2941                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2942                 return true;
2943         }
2944
2945         cli = rpc_pipe_np_smb_conn(rpc_cli);
2946         if (cli == NULL) {
2947                 return false;
2948         }
2949         E_md4hash(cli->password ? cli->password : "", nt_hash);
2950         return true;
2951 }
2952
2953 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2954                                struct cli_pipe_auth_data **presult)
2955 {
2956         struct cli_pipe_auth_data *result;
2957
2958         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2959         if (result == NULL) {
2960                 return NT_STATUS_NO_MEMORY;
2961         }
2962
2963         result->auth_type = PIPE_AUTH_TYPE_NONE;
2964         result->auth_level = PIPE_AUTH_LEVEL_NONE;
2965
2966         result->user_name = talloc_strdup(result, "");
2967         result->domain = talloc_strdup(result, "");
2968         if ((result->user_name == NULL) || (result->domain == NULL)) {
2969                 TALLOC_FREE(result);
2970                 return NT_STATUS_NO_MEMORY;
2971         }
2972
2973         *presult = result;
2974         return NT_STATUS_OK;
2975 }
2976
2977 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2978 {
2979         ntlmssp_end(&auth->a_u.ntlmssp_state);
2980         return 0;
2981 }
2982
2983 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
2984                                   enum pipe_auth_type auth_type,
2985                                   enum pipe_auth_level auth_level,
2986                                   const char *domain,
2987                                   const char *username,
2988                                   const char *password,
2989                                   struct cli_pipe_auth_data **presult)
2990 {
2991         struct cli_pipe_auth_data *result;
2992         NTSTATUS status;
2993
2994         result = talloc(mem_ctx, struct cli_pipe_auth_data);
2995         if (result == NULL) {
2996                 return NT_STATUS_NO_MEMORY;
2997         }
2998
2999         result->auth_type = auth_type;
3000         result->auth_level = auth_level;
3001
3002         result->user_name = talloc_strdup(result, username);
3003         result->domain = talloc_strdup(result, domain);
3004         if ((result->user_name == NULL) || (result->domain == NULL)) {
3005                 status = NT_STATUS_NO_MEMORY;
3006                 goto fail;
3007         }
3008
3009         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3010         if (!NT_STATUS_IS_OK(status)) {
3011                 goto fail;
3012         }
3013
3014         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3015
3016         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3017         if (!NT_STATUS_IS_OK(status)) {
3018                 goto fail;
3019         }
3020
3021         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3022         if (!NT_STATUS_IS_OK(status)) {
3023                 goto fail;
3024         }
3025
3026         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3027         if (!NT_STATUS_IS_OK(status)) {
3028                 goto fail;
3029         }
3030
3031         /*
3032          * Turn off sign+seal to allow selected auth level to turn it back on.
3033          */
3034         result->a_u.ntlmssp_state->neg_flags &=
3035                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3036
3037         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3038                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3039         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3040                 result->a_u.ntlmssp_state->neg_flags
3041                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3042         }
3043
3044         *presult = result;
3045         return NT_STATUS_OK;
3046
3047  fail:
3048         TALLOC_FREE(result);
3049         return status;
3050 }
3051
3052 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3053                                    enum pipe_auth_level auth_level,
3054                                    const uint8_t sess_key[16],
3055                                    struct cli_pipe_auth_data **presult)
3056 {
3057         struct cli_pipe_auth_data *result;
3058
3059         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3060         if (result == NULL) {
3061                 return NT_STATUS_NO_MEMORY;
3062         }
3063
3064         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3065         result->auth_level = auth_level;
3066
3067         result->user_name = talloc_strdup(result, "");
3068         result->domain = talloc_strdup(result, domain);
3069         if ((result->user_name == NULL) || (result->domain == NULL)) {
3070                 goto fail;
3071         }
3072
3073         result->a_u.schannel_auth = talloc(result,
3074                                            struct schannel_auth_struct);
3075         if (result->a_u.schannel_auth == NULL) {
3076                 goto fail;
3077         }
3078
3079         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3080                sizeof(result->a_u.schannel_auth->sess_key));
3081         result->a_u.schannel_auth->seq_num = 0;
3082
3083         *presult = result;
3084         return NT_STATUS_OK;
3085
3086  fail:
3087         TALLOC_FREE(result);
3088         return NT_STATUS_NO_MEMORY;
3089 }
3090
3091 #ifdef HAVE_KRB5
3092 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3093 {
3094         data_blob_free(&auth->session_key);
3095         return 0;
3096 }
3097 #endif
3098
3099 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3100                                    enum pipe_auth_level auth_level,
3101                                    const char *service_princ,
3102                                    const char *username,
3103                                    const char *password,
3104                                    struct cli_pipe_auth_data **presult)
3105 {
3106 #ifdef HAVE_KRB5
3107         struct cli_pipe_auth_data *result;
3108
3109         if ((username != NULL) && (password != NULL)) {
3110                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3111                 if (ret != 0) {
3112                         return NT_STATUS_ACCESS_DENIED;
3113                 }
3114         }
3115
3116         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3117         if (result == NULL) {
3118                 return NT_STATUS_NO_MEMORY;
3119         }
3120
3121         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3122         result->auth_level = auth_level;
3123
3124         /*
3125          * Username / domain need fixing!
3126          */
3127         result->user_name = talloc_strdup(result, "");
3128         result->domain = talloc_strdup(result, "");
3129         if ((result->user_name == NULL) || (result->domain == NULL)) {
3130                 goto fail;
3131         }
3132
3133         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3134                 result, struct kerberos_auth_struct);
3135         if (result->a_u.kerberos_auth == NULL) {
3136                 goto fail;
3137         }
3138         talloc_set_destructor(result->a_u.kerberos_auth,
3139                               cli_auth_kerberos_data_destructor);
3140
3141         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3142                 result, service_princ);
3143         if (result->a_u.kerberos_auth->service_principal == NULL) {
3144                 goto fail;
3145         }
3146
3147         *presult = result;
3148         return NT_STATUS_OK;
3149
3150  fail:
3151         TALLOC_FREE(result);
3152         return NT_STATUS_NO_MEMORY;
3153 #else
3154         return NT_STATUS_NOT_SUPPORTED;
3155 #endif
3156 }
3157
3158 /**
3159  * Create an rpc pipe client struct, connecting to a tcp port.
3160  */
3161 sta