r4777: added a smb_composite_sesssetup() async composite function. This
[gd/samba-autobuild/.git] / source4 / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4
5    Copyright (C) Andrew Tridgell 1994-2005
6    Copyright (C) James Myers 2003 <myersjj@samba.org>
7    
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "includes.h"
24 #include "libcli/raw/libcliraw.h"
25 #include "system/time.h"
26 #include "dlinklist.h"
27 #include "events.h"
28
29
30 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
31 static void smbcli_transport_process_send(struct smbcli_transport *transport);
32
33 /*
34   an event has happened on the socket
35 */
36 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
37                                            struct timeval t, uint16_t flags)
38 {
39         struct smbcli_transport *transport = fde->private;
40
41         if (flags & EVENT_FD_READ) {
42                 smbcli_transport_process_recv(transport);
43         }
44         if (flags & EVENT_FD_WRITE) {
45                 smbcli_transport_process_send(transport);
46         }
47 }
48
49 /*
50   destroy a transport
51  */
52 static int transport_destructor(void *ptr)
53 {
54         struct smbcli_transport *transport = ptr;
55
56         smbcli_transport_dead(transport);
57         event_remove_fd(transport->socket->event.ctx, transport->socket->event.fde);
58         event_remove_timed(transport->socket->event.ctx, transport->socket->event.te);
59         return 0;
60 }
61
62 /*
63   create a transport structure based on an established socket
64 */
65 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
66 {
67         struct smbcli_transport *transport;
68
69         transport = talloc_p(sock, struct smbcli_transport);
70         if (!transport) return NULL;
71
72         ZERO_STRUCTP(transport);
73
74         transport->socket = talloc_reference(transport, sock);
75         transport->negotiate.protocol = PROTOCOL_NT1;
76         transport->options.use_spnego = lp_use_spnego();
77         transport->options.max_xmit = lp_max_xmit();
78         transport->options.max_mux = lp_maxmux();
79
80         transport->negotiate.max_xmit = transport->options.max_xmit;
81         
82         smbcli_init_signing(transport);
83
84         ZERO_STRUCT(transport->called);
85
86         /* take over event handling from the socket layer - it only
87            handles events up until we are connected */
88         transport->socket->event.fde->handler = smbcli_transport_event_handler;
89         transport->socket->event.fde->private = transport;
90         transport->socket->event.fde->flags = EVENT_FD_READ;
91
92         talloc_set_destructor(transport, transport_destructor);
93
94         return transport;
95 }
96
97 /*
98   mark the transport as dead
99 */
100 void smbcli_transport_dead(struct smbcli_transport *transport)
101 {
102         smbcli_sock_dead(transport->socket);
103
104         /* all pending sends become errors */
105         while (transport->pending_send) {
106                 struct smbcli_request *req = transport->pending_send;
107                 req->state = SMBCLI_REQUEST_ERROR;
108                 req->status = NT_STATUS_NET_WRITE_FAULT;
109                 DLIST_REMOVE(transport->pending_send, req);
110                 if (req->async.fn) {
111                         req->async.fn(req);
112                 }
113         }
114
115         /* as do all pending receives */
116         while (transport->pending_recv) {
117                 struct smbcli_request *req = transport->pending_recv;
118                 req->state = SMBCLI_REQUEST_ERROR;
119                 req->status = NT_STATUS_NET_WRITE_FAULT;
120                 DLIST_REMOVE(transport->pending_recv, req);
121                 if (req->async.fn) {
122                         req->async.fn(req);
123                 }
124         }
125 }
126
127
128 /*
129   enable select for write on a transport
130 */
131 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
132 {
133         transport->socket->event.fde->flags |= EVENT_FD_WRITE;
134 }
135
136 /*
137   disable select for write on a transport
138 */
139 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
140 {
141         transport->socket->event.fde->flags &= ~EVENT_FD_WRITE;
142 }
143
144 /*
145   send a session request
146 */
147 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
148                                                      struct nmb_name *calling, 
149                                                      struct nmb_name *called)
150 {
151         uint8_t *p;
152         int len = NBT_HDR_SIZE;
153         struct smbcli_request *req;
154
155         if (called) {
156                 transport->called = *called;
157         }
158
159         /* allocate output buffer */
160         req = smbcli_request_setup_nonsmb(transport, 
161                                           NBT_HDR_SIZE + 2*nbt_mangled_name_len());
162         if (req == NULL) return NULL;
163
164         /* put in the destination name */
165         p = req->out.buffer + NBT_HDR_SIZE;
166         name_mangle(called->name, (char *)p, called->name_type);
167         len += name_len((char *)p);
168
169         /* and my name */
170         p = req->out.buffer+len;
171         name_mangle(calling->name, (char *)p, calling->name_type);
172         len += name_len((char *)p);
173
174         _smb_setlen(req->out.buffer,len-4);
175         SCVAL(req->out.buffer,0,0x81);
176
177         if (!smbcli_request_send(req)) {
178                 smbcli_request_destroy(req);
179                 return NULL;
180         }
181
182         return req;
183 }
184
185 /*
186   map a session request error to a NTSTATUS
187  */
188 static NTSTATUS map_session_refused_error(uint8_t error)
189 {
190         switch (error) {
191         case 0x80:
192         case 0x81:
193                 return NT_STATUS_REMOTE_NOT_LISTENING;
194         case 0x82:
195                 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
196         case 0x83:
197                 return NT_STATUS_REMOTE_RESOURCES;
198         }
199         return NT_STATUS_UNEXPECTED_IO_ERROR;
200 }
201
202
203 /*
204   finish a smbcli_transport_connect()
205 */
206 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
207 {
208         NTSTATUS status;
209
210         if (!smbcli_request_receive(req)) {
211                 smbcli_request_destroy(req);
212                 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
213         }
214
215         switch (CVAL(req->in.buffer,0)) {
216         case 0x82:
217                 status = NT_STATUS_OK;
218                 break;
219         case 0x83:
220                 status = map_session_refused_error(CVAL(req->in.buffer,4));
221                 break;
222         case 0x84:
223                 DEBUG(1,("Warning: session retarget not supported\n"));
224                 status = NT_STATUS_NOT_SUPPORTED;
225                 break;
226         default:
227                 status = NT_STATUS_UNEXPECTED_IO_ERROR;
228                 break;
229         }
230
231         smbcli_request_destroy(req);
232         return status;
233 }
234
235
236 /*
237   send a session request (if needed)
238 */
239 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
240                               struct nmb_name *calling, 
241                               struct nmb_name *called)
242 {
243         struct smbcli_request *req;
244         NTSTATUS status;
245
246         if (transport->socket->port == 445) {
247                 return True;
248         }
249
250         req = smbcli_transport_connect_send(transport, 
251                                             calling, called);
252         status = smbcli_transport_connect_recv(req);
253         return NT_STATUS_IS_OK(status);
254 }
255
256 /****************************************************************************
257 get next mid in sequence
258 ****************************************************************************/
259 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
260 {
261         uint16_t mid;
262         struct smbcli_request *req;
263
264         mid = transport->next_mid;
265
266 again:
267         /* now check to see if this mid is being used by one of the 
268            pending requests. This is quite efficient because the list is
269            usually very short */
270
271         /* the zero mid is reserved for requests that don't have a mid */
272         if (mid == 0) mid = 1;
273
274         for (req=transport->pending_recv; req; req=req->next) {
275                 if (req->mid == mid) {
276                         mid++;
277                         goto again;
278                 }
279         }
280
281         transport->next_mid = mid+1;
282         return mid;
283 }
284
285 static void idle_handler(struct event_context *ev, 
286                          struct timed_event *te, struct timeval t)
287 {
288         struct smbcli_transport *transport = te->private;
289         te->next_event = timeval_add(&te->next_event, 0, transport->idle.period);
290         transport->idle.func(transport, transport->idle.private);
291 }
292
293 /*
294   setup the idle handler for a transport
295   the period is in microseconds
296 */
297 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
298                                    void (*idle_func)(struct smbcli_transport *, void *),
299                                    uint64_t period,
300                                    void *private)
301 {
302         struct timed_event te;
303         transport->idle.func = idle_func;
304         transport->idle.private = private;
305         transport->idle.period = period;
306
307         if (transport->socket->event.te != NULL) {
308                 event_remove_timed(transport->socket->event.ctx, transport->socket->event.te);
309         }
310
311         te.next_event = timeval_current_ofs(0, period);
312         te.handler = idle_handler;
313         te.private = transport;
314         transport->socket->event.te = event_add_timed(transport->socket->event.ctx, &te);
315 }
316
317 /*
318   process some pending sends
319 */
320 static void smbcli_transport_process_send(struct smbcli_transport *transport)
321 {
322         while (transport->pending_send) {
323                 struct smbcli_request *req = transport->pending_send;
324                 ssize_t ret;
325                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
326                 if (ret == -1) {
327                         if (errno == EAGAIN || errno == EINTR) {
328                                 return;
329                         }
330                         smbcli_transport_dead(transport);
331                         return;
332                 }
333                 req->out.buffer += ret;
334                 req->out.size -= ret;
335                 if (req->out.size == 0) {
336                         DLIST_REMOVE(transport->pending_send, req);
337                         if (req->one_way_request) {
338                                 req->state = SMBCLI_REQUEST_DONE;
339                                 smbcli_request_destroy(req);
340                         } else {
341                                 req->state = SMBCLI_REQUEST_RECV;
342                                 DLIST_ADD(transport->pending_recv, req);
343                         }
344                 }
345         }
346
347         /* we're out of requests to send, so don't wait for write
348            events any more */
349         smbcli_transport_write_disable(transport);
350 }
351
352 /*
353   we have a full request in our receive buffer - match it to a pending request
354   and process
355  */
356 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
357 {
358         uint8_t *buffer, *hdr, *vwv;
359         int len;
360         uint16_t wct=0, mid = 0;
361         struct smbcli_request *req;
362
363         buffer = transport->recv_buffer.buffer;
364         len = transport->recv_buffer.req_size;
365
366         ZERO_STRUCT(transport->recv_buffer);
367
368         hdr = buffer+NBT_HDR_SIZE;
369         vwv = hdr + HDR_VWV;
370
371         /* see if it could be an oplock break request */
372         if (handle_oplock_break(transport, len, hdr, vwv)) {
373                 talloc_free(buffer);
374                 return;
375         }
376
377         /* at this point we need to check for a readbraw reply, as
378            these can be any length */
379         if (transport->readbraw_pending) {
380                 transport->readbraw_pending = 0;
381
382                 /* it must match the first entry in the pending queue
383                    as the client is not allowed to have outstanding
384                    readbraw requests */
385                 req = transport->pending_recv;
386                 if (!req) goto error;
387
388                 req->in.buffer = buffer;
389                 talloc_steal(req, buffer);
390                 req->in.size = len;
391                 req->in.allocated = req->in.size;
392                 goto async;
393         }
394
395         if (len >= MIN_SMB_SIZE) {
396                 /* extract the mid for matching to pending requests */
397                 mid = SVAL(hdr, HDR_MID);
398                 wct = CVAL(hdr, HDR_WCT);
399         }
400
401         /* match the incoming request against the list of pending requests */
402         for (req=transport->pending_recv; req; req=req->next) {
403                 if (req->mid == mid) break;
404         }
405
406         if (!req) {
407                 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", 
408                          mid, CVAL(hdr, HDR_COM)));
409                 goto error;
410         }
411
412         /* fill in the 'in' portion of the matching request */
413         req->in.buffer = buffer;
414         talloc_steal(req, buffer);
415         req->in.size = len;
416         req->in.allocated = req->in.size;
417
418         /* handle NBT session replies */
419         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
420                 req->status = NT_STATUS_OK;
421                 goto async;
422         }
423
424         /* handle non-SMB replies */
425         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
426                 req->state = SMBCLI_REQUEST_ERROR;
427                 goto error;
428         }
429
430         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
431                 DEBUG(2,("bad reply size for mid %d\n", mid));
432                 req->status = NT_STATUS_UNSUCCESSFUL;
433                 req->state = SMBCLI_REQUEST_ERROR;
434                 goto error;
435         }
436
437         req->in.hdr = hdr;
438         req->in.vwv = vwv;
439         req->in.wct = wct;
440         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
441                 req->in.data = req->in.vwv + VWV(wct) + 2;
442                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
443                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
444                         DEBUG(3,("bad data size for mid %d\n", mid));
445                         /* blergh - w2k3 gives a bogus data size values in some
446                            openX replies */
447                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
448                 }
449         }
450         req->in.ptr = req->in.data;
451         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
452
453         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
454                 transport->error.etype = ETYPE_DOS;
455                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
456                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
457                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
458                                               transport->error.e.dos.ecode);
459         } else {
460                 transport->error.etype = ETYPE_NT;
461                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
462                 req->status = transport->error.e.nt_status;
463         }
464
465         if (!smbcli_request_check_sign_mac(req)) {
466                 transport->error.etype = ETYPE_SOCKET;
467                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
468                 req->state = SMBCLI_REQUEST_ERROR;
469                 req->status = NT_STATUS_ACCESS_DENIED;
470                 goto error;
471         };
472
473 async:
474         /* if this request has an async handler then call that to
475            notify that the reply has been received. This might destroy
476            the request so it must happen last */
477         DLIST_REMOVE(transport->pending_recv, req);
478         req->state = SMBCLI_REQUEST_DONE;
479         if (req->async.fn) {
480                 req->async.fn(req);
481         }
482         return;
483
484 error:
485         if (req) {
486                 DLIST_REMOVE(transport->pending_recv, req);
487                 req->state = SMBCLI_REQUEST_ERROR;
488         }
489 }
490
491 /*
492   process some pending receives
493 */
494 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
495 {
496         /* a incoming packet goes through 2 stages - first we read the
497            4 byte header, which tells us how much more is coming. Then
498            we read the rest */
499         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
500                 ssize_t ret;
501                 ret = smbcli_sock_read(transport->socket, 
502                                     transport->recv_buffer.header + 
503                                     transport->recv_buffer.received,
504                                     NBT_HDR_SIZE - transport->recv_buffer.received);
505                 if (ret == -1) {
506                         smbcli_transport_dead(transport);
507                         return;
508                 }
509
510                 transport->recv_buffer.received += ret;
511
512                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
513                         /* we've got a full header */
514                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
515                         transport->recv_buffer.buffer = talloc_size(transport,
516                                                                     NBT_HDR_SIZE+transport->recv_buffer.req_size);
517                         if (transport->recv_buffer.buffer == NULL) {
518                                 smbcli_transport_dead(transport);
519                                 return;
520                         }
521                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
522                 }
523         }
524
525         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
526                 ssize_t ret;
527                 ret = smbcli_sock_read(transport->socket, 
528                                     transport->recv_buffer.buffer + 
529                                     transport->recv_buffer.received,
530                                     transport->recv_buffer.req_size - 
531                                     transport->recv_buffer.received);
532                 if (ret == -1) {
533                         smbcli_transport_dead(transport);
534                         return;
535                 }
536                 transport->recv_buffer.received += ret;
537         }
538
539         if (transport->recv_buffer.received != 0 &&
540             transport->recv_buffer.received == transport->recv_buffer.req_size) {
541                 smbcli_transport_finish_recv(transport);
542         }
543 }
544
545 /*
546   process some read/write requests that are pending
547   return False if the socket is dead
548 */
549 BOOL smbcli_transport_process(struct smbcli_transport *transport)
550 {
551         smbcli_transport_process_send(transport);
552         smbcli_transport_process_recv(transport);
553         if (transport->socket->sock == NULL) {
554                 return False;
555         }
556         return True;
557 }
558
559
560
561 /*
562   put a request into the send queue
563 */
564 void smbcli_transport_send(struct smbcli_request *req)
565 {
566         /* check if the transport is dead */
567         if (req->transport->socket->sock == NULL) {
568                 req->state = SMBCLI_REQUEST_ERROR;
569                 req->status = NT_STATUS_NET_WRITE_FAULT;
570                 return;
571         }
572
573         /* put it on the outgoing socket queue */
574         req->state = SMBCLI_REQUEST_SEND;
575         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
576
577         /* make sure we look for write events */
578         smbcli_transport_write_enable(req->transport);
579 }