r4811: now that the event context is at the socket level, the event cleanup
[jelmer/samba4-debian.git] / source / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4
5    Copyright (C) Andrew Tridgell 1994-2005
6    Copyright (C) James Myers 2003 <myersjj@samba.org>
7    
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "includes.h"
24 #include "libcli/raw/libcliraw.h"
25 #include "system/time.h"
26 #include "dlinklist.h"
27 #include "events.h"
28
29
30 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
31 static void smbcli_transport_process_send(struct smbcli_transport *transport);
32
33 /*
34   an event has happened on the socket
35 */
36 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
37                                            struct timeval t, uint16_t flags)
38 {
39         struct smbcli_transport *transport = fde->private;
40
41         if (flags & EVENT_FD_READ) {
42                 smbcli_transport_process_recv(transport);
43         }
44         if (flags & EVENT_FD_WRITE) {
45                 smbcli_transport_process_send(transport);
46         }
47 }
48
49 /*
50   destroy a transport
51  */
52 static int transport_destructor(void *ptr)
53 {
54         struct smbcli_transport *transport = ptr;
55
56         smbcli_transport_dead(transport);
57         event_remove_timed(transport->socket->event.ctx, transport->socket->event.te);
58         return 0;
59 }
60
61 /*
62   create a transport structure based on an established socket
63 */
64 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
65 {
66         struct smbcli_transport *transport;
67
68         transport = talloc_p(sock, struct smbcli_transport);
69         if (!transport) return NULL;
70
71         ZERO_STRUCTP(transport);
72
73         transport->socket = talloc_reference(transport, sock);
74         transport->negotiate.protocol = PROTOCOL_NT1;
75         transport->options.use_spnego = lp_use_spnego();
76         transport->options.max_xmit = lp_max_xmit();
77         transport->options.max_mux = lp_maxmux();
78
79         transport->negotiate.max_xmit = transport->options.max_xmit;
80         
81         smbcli_init_signing(transport);
82
83         ZERO_STRUCT(transport->called);
84
85         /* take over event handling from the socket layer - it only
86            handles events up until we are connected */
87         transport->socket->event.fde->handler = smbcli_transport_event_handler;
88         transport->socket->event.fde->private = transport;
89         transport->socket->event.fde->flags = EVENT_FD_READ;
90
91         talloc_set_destructor(transport, transport_destructor);
92
93         return transport;
94 }
95
96 /*
97   mark the transport as dead
98 */
99 void smbcli_transport_dead(struct smbcli_transport *transport)
100 {
101         smbcli_sock_dead(transport->socket);
102
103         /* all pending sends become errors */
104         while (transport->pending_send) {
105                 struct smbcli_request *req = transport->pending_send;
106                 req->state = SMBCLI_REQUEST_ERROR;
107                 req->status = NT_STATUS_NET_WRITE_FAULT;
108                 DLIST_REMOVE(transport->pending_send, req);
109                 if (req->async.fn) {
110                         req->async.fn(req);
111                 }
112         }
113
114         /* as do all pending receives */
115         while (transport->pending_recv) {
116                 struct smbcli_request *req = transport->pending_recv;
117                 req->state = SMBCLI_REQUEST_ERROR;
118                 req->status = NT_STATUS_NET_WRITE_FAULT;
119                 DLIST_REMOVE(transport->pending_recv, req);
120                 if (req->async.fn) {
121                         req->async.fn(req);
122                 }
123         }
124 }
125
126
127 /*
128   enable select for write on a transport
129 */
130 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
131 {
132         transport->socket->event.fde->flags |= EVENT_FD_WRITE;
133 }
134
135 /*
136   disable select for write on a transport
137 */
138 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
139 {
140         transport->socket->event.fde->flags &= ~EVENT_FD_WRITE;
141 }
142
143 /*
144   send a session request
145 */
146 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
147                                                      struct nmb_name *calling, 
148                                                      struct nmb_name *called)
149 {
150         uint8_t *p;
151         int len = NBT_HDR_SIZE;
152         struct smbcli_request *req;
153
154         if (called) {
155                 transport->called = *called;
156         }
157
158         /* allocate output buffer */
159         req = smbcli_request_setup_nonsmb(transport, 
160                                           NBT_HDR_SIZE + 2*nbt_mangled_name_len());
161         if (req == NULL) return NULL;
162
163         /* put in the destination name */
164         p = req->out.buffer + NBT_HDR_SIZE;
165         name_mangle(called->name, (char *)p, called->name_type);
166         len += name_len((char *)p);
167
168         /* and my name */
169         p = req->out.buffer+len;
170         name_mangle(calling->name, (char *)p, calling->name_type);
171         len += name_len((char *)p);
172
173         _smb_setlen(req->out.buffer,len-4);
174         SCVAL(req->out.buffer,0,0x81);
175
176         if (!smbcli_request_send(req)) {
177                 smbcli_request_destroy(req);
178                 return NULL;
179         }
180
181         return req;
182 }
183
184 /*
185   map a session request error to a NTSTATUS
186  */
187 static NTSTATUS map_session_refused_error(uint8_t error)
188 {
189         switch (error) {
190         case 0x80:
191         case 0x81:
192                 return NT_STATUS_REMOTE_NOT_LISTENING;
193         case 0x82:
194                 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
195         case 0x83:
196                 return NT_STATUS_REMOTE_RESOURCES;
197         }
198         return NT_STATUS_UNEXPECTED_IO_ERROR;
199 }
200
201
202 /*
203   finish a smbcli_transport_connect()
204 */
205 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
206 {
207         NTSTATUS status;
208
209         if (!smbcli_request_receive(req)) {
210                 smbcli_request_destroy(req);
211                 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
212         }
213
214         switch (CVAL(req->in.buffer,0)) {
215         case 0x82:
216                 status = NT_STATUS_OK;
217                 break;
218         case 0x83:
219                 status = map_session_refused_error(CVAL(req->in.buffer,4));
220                 break;
221         case 0x84:
222                 DEBUG(1,("Warning: session retarget not supported\n"));
223                 status = NT_STATUS_NOT_SUPPORTED;
224                 break;
225         default:
226                 status = NT_STATUS_UNEXPECTED_IO_ERROR;
227                 break;
228         }
229
230         smbcli_request_destroy(req);
231         return status;
232 }
233
234
235 /*
236   send a session request (if needed)
237 */
238 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
239                               struct nmb_name *calling, 
240                               struct nmb_name *called)
241 {
242         struct smbcli_request *req;
243         NTSTATUS status;
244
245         if (transport->socket->port == 445) {
246                 return True;
247         }
248
249         req = smbcli_transport_connect_send(transport, 
250                                             calling, called);
251         status = smbcli_transport_connect_recv(req);
252         return NT_STATUS_IS_OK(status);
253 }
254
255 /****************************************************************************
256 get next mid in sequence
257 ****************************************************************************/
258 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
259 {
260         uint16_t mid;
261         struct smbcli_request *req;
262
263         mid = transport->next_mid;
264
265 again:
266         /* now check to see if this mid is being used by one of the 
267            pending requests. This is quite efficient because the list is
268            usually very short */
269
270         /* the zero mid is reserved for requests that don't have a mid */
271         if (mid == 0) mid = 1;
272
273         for (req=transport->pending_recv; req; req=req->next) {
274                 if (req->mid == mid) {
275                         mid++;
276                         goto again;
277                 }
278         }
279
280         transport->next_mid = mid+1;
281         return mid;
282 }
283
284 static void idle_handler(struct event_context *ev, 
285                          struct timed_event *te, struct timeval t)
286 {
287         struct smbcli_transport *transport = te->private;
288         te->next_event = timeval_add(&te->next_event, 0, transport->idle.period);
289         transport->idle.func(transport, transport->idle.private);
290 }
291
292 /*
293   setup the idle handler for a transport
294   the period is in microseconds
295 */
296 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
297                                    void (*idle_func)(struct smbcli_transport *, void *),
298                                    uint64_t period,
299                                    void *private)
300 {
301         struct timed_event te;
302         transport->idle.func = idle_func;
303         transport->idle.private = private;
304         transport->idle.period = period;
305
306         if (transport->socket->event.te != NULL) {
307                 event_remove_timed(transport->socket->event.ctx, transport->socket->event.te);
308         }
309
310         te.next_event = timeval_current_ofs(0, period);
311         te.handler = idle_handler;
312         te.private = transport;
313         transport->socket->event.te = event_add_timed(transport->socket->event.ctx, &te);
314 }
315
316 /*
317   process some pending sends
318 */
319 static void smbcli_transport_process_send(struct smbcli_transport *transport)
320 {
321         while (transport->pending_send) {
322                 struct smbcli_request *req = transport->pending_send;
323                 ssize_t ret;
324                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
325                 if (ret == -1) {
326                         if (errno == EAGAIN || errno == EINTR) {
327                                 return;
328                         }
329                         smbcli_transport_dead(transport);
330                         return;
331                 }
332                 req->out.buffer += ret;
333                 req->out.size -= ret;
334                 if (req->out.size == 0) {
335                         DLIST_REMOVE(transport->pending_send, req);
336                         if (req->one_way_request) {
337                                 req->state = SMBCLI_REQUEST_DONE;
338                                 smbcli_request_destroy(req);
339                         } else {
340                                 req->state = SMBCLI_REQUEST_RECV;
341                                 DLIST_ADD(transport->pending_recv, req);
342                         }
343                 }
344         }
345
346         /* we're out of requests to send, so don't wait for write
347            events any more */
348         smbcli_transport_write_disable(transport);
349 }
350
351 /*
352   we have a full request in our receive buffer - match it to a pending request
353   and process
354  */
355 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
356 {
357         uint8_t *buffer, *hdr, *vwv;
358         int len;
359         uint16_t wct=0, mid = 0;
360         struct smbcli_request *req;
361
362         buffer = transport->recv_buffer.buffer;
363         len = transport->recv_buffer.req_size;
364
365         ZERO_STRUCT(transport->recv_buffer);
366
367         hdr = buffer+NBT_HDR_SIZE;
368         vwv = hdr + HDR_VWV;
369
370         /* see if it could be an oplock break request */
371         if (handle_oplock_break(transport, len, hdr, vwv)) {
372                 talloc_free(buffer);
373                 return;
374         }
375
376         /* at this point we need to check for a readbraw reply, as
377            these can be any length */
378         if (transport->readbraw_pending) {
379                 transport->readbraw_pending = 0;
380
381                 /* it must match the first entry in the pending queue
382                    as the client is not allowed to have outstanding
383                    readbraw requests */
384                 req = transport->pending_recv;
385                 if (!req) goto error;
386
387                 req->in.buffer = buffer;
388                 talloc_steal(req, buffer);
389                 req->in.size = len;
390                 req->in.allocated = req->in.size;
391                 goto async;
392         }
393
394         if (len >= MIN_SMB_SIZE) {
395                 /* extract the mid for matching to pending requests */
396                 mid = SVAL(hdr, HDR_MID);
397                 wct = CVAL(hdr, HDR_WCT);
398         }
399
400         /* match the incoming request against the list of pending requests */
401         for (req=transport->pending_recv; req; req=req->next) {
402                 if (req->mid == mid) break;
403         }
404
405         if (!req) {
406                 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", 
407                          mid, CVAL(hdr, HDR_COM)));
408                 goto error;
409         }
410
411         /* fill in the 'in' portion of the matching request */
412         req->in.buffer = buffer;
413         talloc_steal(req, buffer);
414         req->in.size = len;
415         req->in.allocated = req->in.size;
416
417         /* handle NBT session replies */
418         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
419                 req->status = NT_STATUS_OK;
420                 goto async;
421         }
422
423         /* handle non-SMB replies */
424         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
425                 req->state = SMBCLI_REQUEST_ERROR;
426                 goto error;
427         }
428
429         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
430                 DEBUG(2,("bad reply size for mid %d\n", mid));
431                 req->status = NT_STATUS_UNSUCCESSFUL;
432                 req->state = SMBCLI_REQUEST_ERROR;
433                 goto error;
434         }
435
436         req->in.hdr = hdr;
437         req->in.vwv = vwv;
438         req->in.wct = wct;
439         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
440                 req->in.data = req->in.vwv + VWV(wct) + 2;
441                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
442                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
443                         DEBUG(3,("bad data size for mid %d\n", mid));
444                         /* blergh - w2k3 gives a bogus data size values in some
445                            openX replies */
446                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
447                 }
448         }
449         req->in.ptr = req->in.data;
450         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
451
452         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
453                 transport->error.etype = ETYPE_DOS;
454                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
455                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
456                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
457                                               transport->error.e.dos.ecode);
458         } else {
459                 transport->error.etype = ETYPE_NT;
460                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
461                 req->status = transport->error.e.nt_status;
462         }
463
464         if (!smbcli_request_check_sign_mac(req)) {
465                 transport->error.etype = ETYPE_SOCKET;
466                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
467                 req->state = SMBCLI_REQUEST_ERROR;
468                 req->status = NT_STATUS_ACCESS_DENIED;
469                 goto error;
470         };
471
472 async:
473         /* if this request has an async handler then call that to
474            notify that the reply has been received. This might destroy
475            the request so it must happen last */
476         DLIST_REMOVE(transport->pending_recv, req);
477         req->state = SMBCLI_REQUEST_DONE;
478         if (req->async.fn) {
479                 req->async.fn(req);
480         }
481         return;
482
483 error:
484         if (req) {
485                 DLIST_REMOVE(transport->pending_recv, req);
486                 req->state = SMBCLI_REQUEST_ERROR;
487         }
488 }
489
490 /*
491   process some pending receives
492 */
493 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
494 {
495         /* a incoming packet goes through 2 stages - first we read the
496            4 byte header, which tells us how much more is coming. Then
497            we read the rest */
498         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
499                 ssize_t ret;
500                 ret = smbcli_sock_read(transport->socket, 
501                                     transport->recv_buffer.header + 
502                                     transport->recv_buffer.received,
503                                     NBT_HDR_SIZE - transport->recv_buffer.received);
504                 if (ret == -1) {
505                         smbcli_transport_dead(transport);
506                         return;
507                 }
508
509                 transport->recv_buffer.received += ret;
510
511                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
512                         /* we've got a full header */
513                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
514                         transport->recv_buffer.buffer = talloc_size(transport,
515                                                                     NBT_HDR_SIZE+transport->recv_buffer.req_size);
516                         if (transport->recv_buffer.buffer == NULL) {
517                                 smbcli_transport_dead(transport);
518                                 return;
519                         }
520                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
521                 }
522         }
523
524         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
525                 ssize_t ret;
526                 ret = smbcli_sock_read(transport->socket, 
527                                     transport->recv_buffer.buffer + 
528                                     transport->recv_buffer.received,
529                                     transport->recv_buffer.req_size - 
530                                     transport->recv_buffer.received);
531                 if (ret == -1) {
532                         smbcli_transport_dead(transport);
533                         return;
534                 }
535                 transport->recv_buffer.received += ret;
536         }
537
538         if (transport->recv_buffer.received != 0 &&
539             transport->recv_buffer.received == transport->recv_buffer.req_size) {
540                 smbcli_transport_finish_recv(transport);
541         }
542 }
543
544 /*
545   process some read/write requests that are pending
546   return False if the socket is dead
547 */
548 BOOL smbcli_transport_process(struct smbcli_transport *transport)
549 {
550         smbcli_transport_process_send(transport);
551         smbcli_transport_process_recv(transport);
552         if (transport->socket->sock == NULL) {
553                 return False;
554         }
555         return True;
556 }
557
558
559
560 /*
561   put a request into the send queue
562 */
563 void smbcli_transport_send(struct smbcli_request *req)
564 {
565         /* check if the transport is dead */
566         if (req->transport->socket->sock == NULL) {
567                 req->state = SMBCLI_REQUEST_ERROR;
568                 req->status = NT_STATUS_NET_WRITE_FAULT;
569                 return;
570         }
571
572         /* put it on the outgoing socket queue */
573         req->state = SMBCLI_REQUEST_SEND;
574         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
575
576         /* make sure we look for write events */
577         smbcli_transport_write_enable(req->transport);
578 }