r3319: fixed a bug in the client library found by the new non-block testing code
[ira/wip.git] / source4 / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4    Copyright (C) Andrew Tridgell 1994-2003
5    Copyright (C) James Myers 2003 <myersjj@samba.org>
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include "includes.h"
23
24
25 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
26 static void smbcli_transport_process_send(struct smbcli_transport *transport);
27
28 /*
29   an event has happened on the socket
30 */
31 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
32                                         time_t t, uint16_t flags)
33 {
34         struct smbcli_transport *transport = fde->private;
35
36         if (flags & EVENT_FD_READ) {
37                 smbcli_transport_process_recv(transport);
38         }
39         if (flags & EVENT_FD_WRITE) {
40                 smbcli_transport_process_send(transport);
41         }
42 }
43
44 /*
45   destroy a transport
46  */
47 static int transport_destructor(void *ptr)
48 {
49         struct smbcli_transport *transport = ptr;
50
51         smbcli_transport_dead(transport);
52         event_remove_fd(transport->event.ctx, transport->event.fde);
53         event_remove_timed(transport->event.ctx, transport->event.te);
54         return 0;
55 }
56
57 /*
58   create a transport structure based on an established socket
59 */
60 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
61 {
62         struct smbcli_transport *transport;
63         struct fd_event fde;
64
65         transport = talloc_p(sock, struct smbcli_transport);
66         if (!transport) return NULL;
67
68         ZERO_STRUCTP(transport);
69
70         transport->event.ctx = event_context_init(transport);
71         if (transport->event.ctx == NULL) {
72                 talloc_free(transport);
73                 return NULL;
74         }
75
76         transport->socket = talloc_reference(transport, sock);
77         transport->negotiate.protocol = PROTOCOL_NT1;
78         transport->options.use_spnego = lp_use_spnego();
79         transport->negotiate.max_xmit = ~0;
80         
81         smbcli_init_signing(transport);
82
83         ZERO_STRUCT(transport->called);
84
85         fde.fd = socket_get_fd(sock->sock);
86         fde.flags = EVENT_FD_READ;
87         fde.handler = smbcli_transport_event_handler;
88         fde.private = transport;
89         fde.ref_count = 1;
90
91         transport->event.fde = event_add_fd(transport->event.ctx, &fde);
92
93         talloc_set_destructor(transport, transport_destructor);
94
95         return transport;
96 }
97
98 /*
99   mark the transport as dead
100 */
101 void smbcli_transport_dead(struct smbcli_transport *transport)
102 {
103         smbcli_sock_dead(transport->socket);
104
105         /* all pending sends become errors */
106         while (transport->pending_send) {
107                 struct smbcli_request *req = transport->pending_send;
108                 req->state = SMBCLI_REQUEST_ERROR;
109                 req->status = NT_STATUS_NET_WRITE_FAULT;
110                 DLIST_REMOVE(transport->pending_send, req);
111                 if (req->async.fn) {
112                         req->async.fn(req);
113                 }
114         }
115
116         /* as do all pending receives */
117         while (transport->pending_recv) {
118                 struct smbcli_request *req = transport->pending_recv;
119                 req->state = SMBCLI_REQUEST_ERROR;
120                 req->status = NT_STATUS_NET_WRITE_FAULT;
121                 DLIST_REMOVE(transport->pending_recv, req);
122                 if (req->async.fn) {
123                         req->async.fn(req);
124                 }
125         }
126 }
127
128
129 /*
130   enable select for write on a transport
131 */
132 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
133 {
134         transport->event.fde->flags |= EVENT_FD_WRITE;
135 }
136
137 /*
138   disable select for write on a transport
139 */
140 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
141 {
142         transport->event.fde->flags &= ~EVENT_FD_WRITE;
143 }
144
145 /****************************************************************************
146 send a session request (if appropriate)
147 ****************************************************************************/
148 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
149                            struct nmb_name *calling, 
150                            struct nmb_name *called)
151 {
152         char *p;
153         int len = NBT_HDR_SIZE;
154         struct smbcli_request *req;
155
156         if (called) {
157                 transport->called = *called;
158         }
159
160         /* 445 doesn't have session request */
161         if (transport->socket->port == 445) {
162                 return True;
163         }
164
165         /* allocate output buffer */
166         req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
167
168         /* put in the destination name */
169         p = req->out.buffer + NBT_HDR_SIZE;
170         name_mangle(called->name, p, called->name_type);
171         len += name_len(p);
172
173         /* and my name */
174         p = req->out.buffer+len;
175         name_mangle(calling->name, p, calling->name_type);
176         len += name_len(p);
177
178         _smb_setlen(req->out.buffer,len-4);
179         SCVAL(req->out.buffer,0,0x81);
180
181         if (!smbcli_request_send(req) ||
182             !smbcli_request_receive(req)) {
183                 smbcli_request_destroy(req);
184                 return False;
185         }
186         
187         if (CVAL(req->in.buffer,0) != 0x82) {
188                 transport->error.etype = ETYPE_NBT;
189                 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
190                 smbcli_request_destroy(req);
191                 return False;
192         }
193
194         smbcli_request_destroy(req);
195         return True;
196 }
197
198
199 /****************************************************************************
200 get next mid in sequence
201 ****************************************************************************/
202 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
203 {
204         uint16_t mid;
205         struct smbcli_request *req;
206
207         mid = transport->next_mid;
208
209 again:
210         /* now check to see if this mid is being used by one of the 
211            pending requests. This is quite efficient because the list is
212            usually very short */
213
214         /* the zero mid is reserved for requests that don't have a mid */
215         if (mid == 0) mid = 1;
216
217         for (req=transport->pending_recv; req; req=req->next) {
218                 if (req->mid == mid) {
219                         mid++;
220                         goto again;
221                 }
222         }
223
224         transport->next_mid = mid+1;
225         return mid;
226 }
227
228 static void idle_handler(struct event_context *ev, 
229                          struct timed_event *te, time_t t)
230 {
231         struct smbcli_transport *transport = te->private;
232         te->next_event = t + transport->idle.period;
233         transport->idle.func(transport, transport->idle.private);
234 }
235
236 /*
237   setup the idle handler for a transport
238   the period is in seconds
239 */
240 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
241                                 void (*idle_func)(struct smbcli_transport *, void *),
242                                 uint_t period,
243                                 void *private)
244 {
245         struct timed_event te;
246         transport->idle.func = idle_func;
247         transport->idle.private = private;
248         transport->idle.period = period;
249
250         if (transport->event.te != NULL) {
251                 event_remove_timed(transport->event.ctx, transport->event.te);
252         }
253
254         te.next_event = time(NULL) + period;
255         te.handler = idle_handler;
256         te.private = transport;
257         transport->event.te = event_add_timed(transport->event.ctx, &te);
258 }
259
260 /*
261   process some pending sends
262 */
263 static void smbcli_transport_process_send(struct smbcli_transport *transport)
264 {
265         while (transport->pending_send) {
266                 struct smbcli_request *req = transport->pending_send;
267                 ssize_t ret;
268                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
269                 if (ret == -1) {
270                         if (errno == EAGAIN || errno == EINTR) {
271                                 return;
272                         }
273                         smbcli_transport_dead(transport);
274                         return;
275                 }
276                 req->out.buffer += ret;
277                 req->out.size -= ret;
278                 if (req->out.size == 0) {
279                         DLIST_REMOVE(transport->pending_send, req);
280                         if (req->one_way_request) {
281                                 req->state = SMBCLI_REQUEST_DONE;
282                                 smbcli_request_destroy(req);
283                         } else {
284                                 req->state = SMBCLI_REQUEST_RECV;
285                                 DLIST_ADD(transport->pending_recv, req);
286                         }
287                 }
288         }
289
290         /* we're out of requests to send, so don't wait for write
291            events any more */
292         smbcli_transport_write_disable(transport);
293 }
294
295 /*
296   we have a full request in our receive buffer - match it to a pending request
297   and process
298  */
299 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
300 {
301         uint8_t *buffer, *hdr, *vwv;
302         int len;
303         uint16_t wct=0, mid = 0;
304         struct smbcli_request *req;
305
306         buffer = transport->recv_buffer.buffer;
307         len = transport->recv_buffer.req_size;
308
309         ZERO_STRUCT(transport->recv_buffer);
310
311         hdr = buffer+NBT_HDR_SIZE;
312         vwv = hdr + HDR_VWV;
313
314         /* see if it could be an oplock break request */
315         if (handle_oplock_break(transport, len, hdr, vwv)) {
316                 talloc_free(buffer);
317                 return;
318         }
319
320         /* at this point we need to check for a readbraw reply, as
321            these can be any length */
322         if (transport->readbraw_pending) {
323                 transport->readbraw_pending = 0;
324
325                 /* it must match the first entry in the pending queue
326                    as the client is not allowed to have outstanding
327                    readbraw requests */
328                 req = transport->pending_recv;
329                 if (!req) goto error;
330
331                 req->in.buffer = buffer;
332                 talloc_steal(req, buffer);
333                 req->in.size = len;
334                 req->in.allocated = req->in.size;
335                 goto async;
336         }
337
338         if (len >= MIN_SMB_SIZE) {
339                 /* extract the mid for matching to pending requests */
340                 mid = SVAL(hdr, HDR_MID);
341                 wct = CVAL(hdr, HDR_WCT);
342         }
343
344         /* match the incoming request against the list of pending requests */
345         for (req=transport->pending_recv; req; req=req->next) {
346                 if (req->mid == mid) break;
347         }
348
349         if (!req) {
350                 DEBUG(1,("Discarding unmatched reply with mid %d\n", mid));
351                 goto error;
352         }
353
354         /* fill in the 'in' portion of the matching request */
355         req->in.buffer = buffer;
356         talloc_steal(req, buffer);
357         req->in.size = len;
358         req->in.allocated = req->in.size;
359
360         /* handle NBT session replies */
361         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
362                 req->status = NT_STATUS_OK;
363                 goto async;
364         }
365
366         /* handle non-SMB replies */
367         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
368                 req->state = SMBCLI_REQUEST_ERROR;
369                 goto error;
370         }
371
372         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
373                 DEBUG(2,("bad reply size for mid %d\n", mid));
374                 req->status = NT_STATUS_UNSUCCESSFUL;
375                 req->state = SMBCLI_REQUEST_ERROR;
376                 goto error;
377         }
378
379         req->in.hdr = hdr;
380         req->in.vwv = vwv;
381         req->in.wct = wct;
382         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
383                 req->in.data = req->in.vwv + VWV(wct) + 2;
384                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
385                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
386                         DEBUG(3,("bad data size for mid %d\n", mid));
387                         /* blergh - w2k3 gives a bogus data size values in some
388                            openX replies */
389                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
390                 }
391         }
392         req->in.ptr = req->in.data;
393         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
394
395         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
396                 transport->error.etype = ETYPE_DOS;
397                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
398                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
399                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
400                                               transport->error.e.dos.ecode);
401         } else {
402                 transport->error.etype = ETYPE_NT;
403                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
404                 req->status = transport->error.e.nt_status;
405         }
406
407         if (!smbcli_request_check_sign_mac(req)) {
408                 transport->error.etype = ETYPE_SOCKET;
409                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
410                 req->state = SMBCLI_REQUEST_ERROR;
411                 goto error;
412         };
413
414 async:
415         /* if this request has an async handler then call that to
416            notify that the reply has been received. This might destroy
417            the request so it must happen last */
418         DLIST_REMOVE(transport->pending_recv, req);
419         req->state = SMBCLI_REQUEST_DONE;
420         if (req->async.fn) {
421                 req->async.fn(req);
422         }
423         return;
424
425 error:
426         if (req) {
427                 DLIST_REMOVE(transport->pending_recv, req);
428                 req->state = SMBCLI_REQUEST_ERROR;
429         }
430 }
431
432 /*
433   process some pending receives
434 */
435 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
436 {
437         /* a incoming packet goes through 2 stages - first we read the
438            4 byte header, which tells us how much more is coming. Then
439            we read the rest */
440         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
441                 ssize_t ret;
442                 ret = smbcli_sock_read(transport->socket, 
443                                     transport->recv_buffer.header + 
444                                     transport->recv_buffer.received,
445                                     NBT_HDR_SIZE - transport->recv_buffer.received);
446                 if (ret == -1) {
447                         smbcli_transport_dead(transport);
448                         return;
449                 }
450
451                 transport->recv_buffer.received += ret;
452
453                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
454                         /* we've got a full header */
455                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
456                         transport->recv_buffer.buffer = talloc(transport,
457                                                                NBT_HDR_SIZE+transport->recv_buffer.req_size);
458                         if (transport->recv_buffer.buffer == NULL) {
459                                 smbcli_transport_dead(transport);
460                                 return;
461                         }
462                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
463                 }
464         }
465
466         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
467                 ssize_t ret;
468                 ret = smbcli_sock_read(transport->socket, 
469                                     transport->recv_buffer.buffer + 
470                                     transport->recv_buffer.received,
471                                     transport->recv_buffer.req_size - 
472                                     transport->recv_buffer.received);
473                 if (ret == -1) {
474                         smbcli_transport_dead(transport);
475                         return;
476                 }
477                 transport->recv_buffer.received += ret;
478         }
479
480         if (transport->recv_buffer.received != 0 &&
481             transport->recv_buffer.received == transport->recv_buffer.req_size) {
482                 smbcli_transport_finish_recv(transport);
483         }
484 }
485
486 /*
487   process some read/write requests that are pending
488   return False if the socket is dead
489 */
490 BOOL smbcli_transport_process(struct smbcli_transport *transport)
491 {
492         smbcli_transport_process_send(transport);
493         smbcli_transport_process_recv(transport);
494         if (transport->socket->sock == NULL) {
495                 return False;
496         }
497         return True;
498 }
499
500
501
502 /*
503   put a request into the send queue
504 */
505 void smbcli_transport_send(struct smbcli_request *req)
506 {
507         /* check if the transport is dead */
508         if (req->transport->socket->sock == NULL) {
509                 req->state = SMBCLI_REQUEST_ERROR;
510                 req->status = NT_STATUS_NET_WRITE_FAULT;
511                 return;
512         }
513
514         /* put it on the outgoing socket queue */
515         req->state = SMBCLI_REQUEST_SEND;
516         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
517
518         /* make sure we look for write events */
519         smbcli_transport_write_enable(req->transport);
520 }