r3481: split out client.h and events.h
[jelmer/samba4-debian.git] / source / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4    Copyright (C) Andrew Tridgell 1994-2003
5    Copyright (C) James Myers 2003 <myersjj@samba.org>
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include "includes.h"
23 #include "libcli/raw/libcliraw.h"
24 #include "system/time.h"
25 #include "dlinklist.h"
26 #include "events.h"
27
28
29 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
30 static void smbcli_transport_process_send(struct smbcli_transport *transport);
31
32 /*
33   an event has happened on the socket
34 */
35 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
36                                         time_t t, uint16_t flags)
37 {
38         struct smbcli_transport *transport = fde->private;
39
40         if (flags & EVENT_FD_READ) {
41                 smbcli_transport_process_recv(transport);
42         }
43         if (flags & EVENT_FD_WRITE) {
44                 smbcli_transport_process_send(transport);
45         }
46 }
47
48 /*
49   destroy a transport
50  */
51 static int transport_destructor(void *ptr)
52 {
53         struct smbcli_transport *transport = ptr;
54
55         smbcli_transport_dead(transport);
56         event_remove_fd(transport->event.ctx, transport->event.fde);
57         event_remove_timed(transport->event.ctx, transport->event.te);
58         return 0;
59 }
60
61 /*
62   create a transport structure based on an established socket
63 */
64 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
65 {
66         struct smbcli_transport *transport;
67         struct fd_event fde;
68
69         transport = talloc_p(sock, struct smbcli_transport);
70         if (!transport) return NULL;
71
72         ZERO_STRUCTP(transport);
73
74         transport->event.ctx = event_context_init(transport);
75         if (transport->event.ctx == NULL) {
76                 talloc_free(transport);
77                 return NULL;
78         }
79
80         transport->socket = talloc_reference(transport, sock);
81         transport->negotiate.protocol = PROTOCOL_NT1;
82         transport->options.use_spnego = lp_use_spnego();
83         transport->options.max_xmit = lp_max_xmit();
84         transport->options.max_mux = lp_maxmux();
85
86         transport->negotiate.max_xmit = transport->options.max_xmit;
87         
88         smbcli_init_signing(transport);
89
90         ZERO_STRUCT(transport->called);
91
92         fde.fd = socket_get_fd(sock->sock);
93         fde.flags = EVENT_FD_READ;
94         fde.handler = smbcli_transport_event_handler;
95         fde.private = transport;
96         fde.ref_count = 1;
97
98         transport->event.fde = event_add_fd(transport->event.ctx, &fde);
99
100         talloc_set_destructor(transport, transport_destructor);
101
102         return transport;
103 }
104
105 /*
106   mark the transport as dead
107 */
108 void smbcli_transport_dead(struct smbcli_transport *transport)
109 {
110         smbcli_sock_dead(transport->socket);
111
112         /* all pending sends become errors */
113         while (transport->pending_send) {
114                 struct smbcli_request *req = transport->pending_send;
115                 req->state = SMBCLI_REQUEST_ERROR;
116                 req->status = NT_STATUS_NET_WRITE_FAULT;
117                 DLIST_REMOVE(transport->pending_send, req);
118                 if (req->async.fn) {
119                         req->async.fn(req);
120                 }
121         }
122
123         /* as do all pending receives */
124         while (transport->pending_recv) {
125                 struct smbcli_request *req = transport->pending_recv;
126                 req->state = SMBCLI_REQUEST_ERROR;
127                 req->status = NT_STATUS_NET_WRITE_FAULT;
128                 DLIST_REMOVE(transport->pending_recv, req);
129                 if (req->async.fn) {
130                         req->async.fn(req);
131                 }
132         }
133 }
134
135
136 /*
137   enable select for write on a transport
138 */
139 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
140 {
141         transport->event.fde->flags |= EVENT_FD_WRITE;
142 }
143
144 /*
145   disable select for write on a transport
146 */
147 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
148 {
149         transport->event.fde->flags &= ~EVENT_FD_WRITE;
150 }
151
152 /****************************************************************************
153 send a session request (if appropriate)
154 ****************************************************************************/
155 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
156                            struct nmb_name *calling, 
157                            struct nmb_name *called)
158 {
159         char *p;
160         int len = NBT_HDR_SIZE;
161         struct smbcli_request *req;
162
163         if (called) {
164                 transport->called = *called;
165         }
166
167         /* 445 doesn't have session request */
168         if (transport->socket->port == 445) {
169                 return True;
170         }
171
172         /* allocate output buffer */
173         req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
174
175         /* put in the destination name */
176         p = req->out.buffer + NBT_HDR_SIZE;
177         name_mangle(called->name, p, called->name_type);
178         len += name_len(p);
179
180         /* and my name */
181         p = req->out.buffer+len;
182         name_mangle(calling->name, p, calling->name_type);
183         len += name_len(p);
184
185         _smb_setlen(req->out.buffer,len-4);
186         SCVAL(req->out.buffer,0,0x81);
187
188         if (!smbcli_request_send(req) ||
189             !smbcli_request_receive(req)) {
190                 smbcli_request_destroy(req);
191                 return False;
192         }
193         
194         if (CVAL(req->in.buffer,0) != 0x82) {
195                 transport->error.etype = ETYPE_NBT;
196                 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
197                 smbcli_request_destroy(req);
198                 return False;
199         }
200
201         smbcli_request_destroy(req);
202         return True;
203 }
204
205
206 /****************************************************************************
207 get next mid in sequence
208 ****************************************************************************/
209 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
210 {
211         uint16_t mid;
212         struct smbcli_request *req;
213
214         mid = transport->next_mid;
215
216 again:
217         /* now check to see if this mid is being used by one of the 
218            pending requests. This is quite efficient because the list is
219            usually very short */
220
221         /* the zero mid is reserved for requests that don't have a mid */
222         if (mid == 0) mid = 1;
223
224         for (req=transport->pending_recv; req; req=req->next) {
225                 if (req->mid == mid) {
226                         mid++;
227                         goto again;
228                 }
229         }
230
231         transport->next_mid = mid+1;
232         return mid;
233 }
234
235 static void idle_handler(struct event_context *ev, 
236                          struct timed_event *te, time_t t)
237 {
238         struct smbcli_transport *transport = te->private;
239         te->next_event = t + transport->idle.period;
240         transport->idle.func(transport, transport->idle.private);
241 }
242
243 /*
244   setup the idle handler for a transport
245   the period is in seconds
246 */
247 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
248                                 void (*idle_func)(struct smbcli_transport *, void *),
249                                 uint_t period,
250                                 void *private)
251 {
252         struct timed_event te;
253         transport->idle.func = idle_func;
254         transport->idle.private = private;
255         transport->idle.period = period;
256
257         if (transport->event.te != NULL) {
258                 event_remove_timed(transport->event.ctx, transport->event.te);
259         }
260
261         te.next_event = time(NULL) + period;
262         te.handler = idle_handler;
263         te.private = transport;
264         transport->event.te = event_add_timed(transport->event.ctx, &te);
265 }
266
267 /*
268   process some pending sends
269 */
270 static void smbcli_transport_process_send(struct smbcli_transport *transport)
271 {
272         while (transport->pending_send) {
273                 struct smbcli_request *req = transport->pending_send;
274                 ssize_t ret;
275                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
276                 if (ret == -1) {
277                         if (errno == EAGAIN || errno == EINTR) {
278                                 return;
279                         }
280                         smbcli_transport_dead(transport);
281                         return;
282                 }
283                 req->out.buffer += ret;
284                 req->out.size -= ret;
285                 if (req->out.size == 0) {
286                         DLIST_REMOVE(transport->pending_send, req);
287                         if (req->one_way_request) {
288                                 req->state = SMBCLI_REQUEST_DONE;
289                                 smbcli_request_destroy(req);
290                         } else {
291                                 req->state = SMBCLI_REQUEST_RECV;
292                                 DLIST_ADD(transport->pending_recv, req);
293                         }
294                 }
295         }
296
297         /* we're out of requests to send, so don't wait for write
298            events any more */
299         smbcli_transport_write_disable(transport);
300 }
301
302 /*
303   we have a full request in our receive buffer - match it to a pending request
304   and process
305  */
306 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
307 {
308         uint8_t *buffer, *hdr, *vwv;
309         int len;
310         uint16_t wct=0, mid = 0;
311         struct smbcli_request *req;
312
313         buffer = transport->recv_buffer.buffer;
314         len = transport->recv_buffer.req_size;
315
316         ZERO_STRUCT(transport->recv_buffer);
317
318         hdr = buffer+NBT_HDR_SIZE;
319         vwv = hdr + HDR_VWV;
320
321         /* see if it could be an oplock break request */
322         if (handle_oplock_break(transport, len, hdr, vwv)) {
323                 talloc_free(buffer);
324                 return;
325         }
326
327         /* at this point we need to check for a readbraw reply, as
328            these can be any length */
329         if (transport->readbraw_pending) {
330                 transport->readbraw_pending = 0;
331
332                 /* it must match the first entry in the pending queue
333                    as the client is not allowed to have outstanding
334                    readbraw requests */
335                 req = transport->pending_recv;
336                 if (!req) goto error;
337
338                 req->in.buffer = buffer;
339                 talloc_steal(req, buffer);
340                 req->in.size = len;
341                 req->in.allocated = req->in.size;
342                 goto async;
343         }
344
345         if (len >= MIN_SMB_SIZE) {
346                 /* extract the mid for matching to pending requests */
347                 mid = SVAL(hdr, HDR_MID);
348                 wct = CVAL(hdr, HDR_WCT);
349         }
350
351         /* match the incoming request against the list of pending requests */
352         for (req=transport->pending_recv; req; req=req->next) {
353                 if (req->mid == mid) break;
354         }
355
356         if (!req) {
357                 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", 
358                          mid, CVAL(hdr, HDR_COM)));
359                 goto error;
360         }
361
362         /* fill in the 'in' portion of the matching request */
363         req->in.buffer = buffer;
364         talloc_steal(req, buffer);
365         req->in.size = len;
366         req->in.allocated = req->in.size;
367
368         /* handle NBT session replies */
369         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
370                 req->status = NT_STATUS_OK;
371                 goto async;
372         }
373
374         /* handle non-SMB replies */
375         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
376                 req->state = SMBCLI_REQUEST_ERROR;
377                 goto error;
378         }
379
380         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
381                 DEBUG(2,("bad reply size for mid %d\n", mid));
382                 req->status = NT_STATUS_UNSUCCESSFUL;
383                 req->state = SMBCLI_REQUEST_ERROR;
384                 goto error;
385         }
386
387         req->in.hdr = hdr;
388         req->in.vwv = vwv;
389         req->in.wct = wct;
390         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
391                 req->in.data = req->in.vwv + VWV(wct) + 2;
392                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
393                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
394                         DEBUG(3,("bad data size for mid %d\n", mid));
395                         /* blergh - w2k3 gives a bogus data size values in some
396                            openX replies */
397                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
398                 }
399         }
400         req->in.ptr = req->in.data;
401         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
402
403         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
404                 transport->error.etype = ETYPE_DOS;
405                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
406                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
407                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
408                                               transport->error.e.dos.ecode);
409         } else {
410                 transport->error.etype = ETYPE_NT;
411                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
412                 req->status = transport->error.e.nt_status;
413         }
414
415         if (!smbcli_request_check_sign_mac(req)) {
416                 transport->error.etype = ETYPE_SOCKET;
417                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
418                 req->state = SMBCLI_REQUEST_ERROR;
419                 goto error;
420         };
421
422 async:
423         /* if this request has an async handler then call that to
424            notify that the reply has been received. This might destroy
425            the request so it must happen last */
426         DLIST_REMOVE(transport->pending_recv, req);
427         req->state = SMBCLI_REQUEST_DONE;
428         if (req->async.fn) {
429                 req->async.fn(req);
430         }
431         return;
432
433 error:
434         if (req) {
435                 DLIST_REMOVE(transport->pending_recv, req);
436                 req->state = SMBCLI_REQUEST_ERROR;
437         }
438 }
439
440 /*
441   process some pending receives
442 */
443 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
444 {
445         /* a incoming packet goes through 2 stages - first we read the
446            4 byte header, which tells us how much more is coming. Then
447            we read the rest */
448         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
449                 ssize_t ret;
450                 ret = smbcli_sock_read(transport->socket, 
451                                     transport->recv_buffer.header + 
452                                     transport->recv_buffer.received,
453                                     NBT_HDR_SIZE - transport->recv_buffer.received);
454                 if (ret == -1) {
455                         smbcli_transport_dead(transport);
456                         return;
457                 }
458
459                 transport->recv_buffer.received += ret;
460
461                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
462                         /* we've got a full header */
463                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
464                         transport->recv_buffer.buffer = talloc(transport,
465                                                                NBT_HDR_SIZE+transport->recv_buffer.req_size);
466                         if (transport->recv_buffer.buffer == NULL) {
467                                 smbcli_transport_dead(transport);
468                                 return;
469                         }
470                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
471                 }
472         }
473
474         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
475                 ssize_t ret;
476                 ret = smbcli_sock_read(transport->socket, 
477                                     transport->recv_buffer.buffer + 
478                                     transport->recv_buffer.received,
479                                     transport->recv_buffer.req_size - 
480                                     transport->recv_buffer.received);
481                 if (ret == -1) {
482                         smbcli_transport_dead(transport);
483                         return;
484                 }
485                 transport->recv_buffer.received += ret;
486         }
487
488         if (transport->recv_buffer.received != 0 &&
489             transport->recv_buffer.received == transport->recv_buffer.req_size) {
490                 smbcli_transport_finish_recv(transport);
491         }
492 }
493
494 /*
495   process some read/write requests that are pending
496   return False if the socket is dead
497 */
498 BOOL smbcli_transport_process(struct smbcli_transport *transport)
499 {
500         smbcli_transport_process_send(transport);
501         smbcli_transport_process_recv(transport);
502         if (transport->socket->sock == NULL) {
503                 return False;
504         }
505         return True;
506 }
507
508
509
510 /*
511   put a request into the send queue
512 */
513 void smbcli_transport_send(struct smbcli_request *req)
514 {
515         /* check if the transport is dead */
516         if (req->transport->socket->sock == NULL) {
517                 req->state = SMBCLI_REQUEST_ERROR;
518                 req->status = NT_STATUS_NET_WRITE_FAULT;
519                 return;
520         }
521
522         /* put it on the outgoing socket queue */
523         req->state = SMBCLI_REQUEST_SEND;
524         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
525
526         /* make sure we look for write events */
527         smbcli_transport_write_enable(req->transport);
528 }