2d29ba371f1169c010f0a64ce7670591d87a0cd1
[jelmer/samba4-debian.git] / source / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4    Copyright (C) Andrew Tridgell 1994-2003
5    Copyright (C) James Myers 2003 <myersjj@samba.org>
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include "includes.h"
23
24 /*
25   an event has happened on the socket
26 */
27 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
28                                         time_t t, uint16_t flags)
29 {
30         struct smbcli_transport *transport = fde->private;
31
32         smbcli_transport_process(transport);
33 }
34
35 /*
36   create a transport structure based on an established socket
37 */
38 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
39 {
40         struct smbcli_transport *transport;
41         struct fd_event fde;
42
43         transport = talloc_named(NULL, sizeof(*transport), "smbcli_transport");
44         if (!transport) return NULL;
45
46         ZERO_STRUCTP(transport);
47
48         transport->event.ctx = event_context_init();
49         if (transport->event.ctx == NULL) {
50                 talloc_free(transport);
51                 return NULL;
52         }
53
54         transport->socket = sock;
55         transport->negotiate.protocol = PROTOCOL_NT1;
56         transport->options.use_spnego = lp_use_spnego();
57         transport->negotiate.max_xmit = ~0;
58         
59         smbcli_init_signing(transport);
60
61         transport->socket->reference_count++;
62
63         ZERO_STRUCT(transport->called);
64
65         fde.fd = sock->fd;
66         fde.flags = EVENT_FD_READ;
67         fde.handler = smbcli_transport_event_handler;
68         fde.private = transport;
69         fde.ref_count = 1;
70
71         transport->event.fde = event_add_fd(transport->event.ctx, &fde);
72
73         return transport;
74 }
75
76 /*
77   decrease reference count on a transport, and destroy if it becomes
78   zero
79 */
80 void smbcli_transport_close(struct smbcli_transport *transport)
81 {
82         transport->reference_count--;
83         if (transport->reference_count <= 0) {
84                 smbcli_sock_close(transport->socket);
85                 event_remove_fd(transport->event.ctx, transport->event.fde);
86                 event_remove_timed(transport->event.ctx, transport->event.te);
87                 event_context_destroy(transport->event.ctx);
88                 talloc_free(transport);
89         }
90 }
91
92 /*
93   mark the transport as dead
94 */
95 void smbcli_transport_dead(struct smbcli_transport *transport)
96 {
97         smbcli_sock_dead(transport->socket);
98
99         /* all pending sends become errors */
100         while (transport->pending_send) {
101                 struct smbcli_request *req = transport->pending_send;
102                 req->state = SMBCLI_REQUEST_ERROR;
103                 req->status = NT_STATUS_NET_WRITE_FAULT;
104                 DLIST_REMOVE(transport->pending_send, req);
105                 if (req->async.fn) {
106                         req->async.fn(req);
107                 }
108         }
109
110         /* as do all pending receives */
111         while (transport->pending_recv) {
112                 struct smbcli_request *req = transport->pending_recv;
113                 req->state = SMBCLI_REQUEST_ERROR;
114                 req->status = NT_STATUS_NET_WRITE_FAULT;
115                 DLIST_REMOVE(transport->pending_recv, req);
116                 if (req->async.fn) {
117                         req->async.fn(req);
118                 }
119         }
120 }
121
122
123 /*
124   enable select for write on a transport
125 */
126 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
127 {
128         transport->event.fde->flags |= EVENT_FD_WRITE;
129 }
130
131 /*
132   disable select for write on a transport
133 */
134 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
135 {
136         transport->event.fde->flags &= ~EVENT_FD_WRITE;
137 }
138
139 /****************************************************************************
140 send a session request (if appropriate)
141 ****************************************************************************/
142 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
143                            struct nmb_name *calling, 
144                            struct nmb_name *called)
145 {
146         char *p;
147         int len = NBT_HDR_SIZE;
148         struct smbcli_request *req;
149
150         if (called) {
151                 transport->called = *called;
152         }
153
154         /* 445 doesn't have session request */
155         if (transport->socket->port == 445) {
156                 return True;
157         }
158
159         /* allocate output buffer */
160         req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
161
162         /* put in the destination name */
163         p = req->out.buffer + NBT_HDR_SIZE;
164         name_mangle(called->name, p, called->name_type);
165         len += name_len(p);
166
167         /* and my name */
168         p = req->out.buffer+len;
169         name_mangle(calling->name, p, calling->name_type);
170         len += name_len(p);
171
172         _smb_setlen(req->out.buffer,len-4);
173         SCVAL(req->out.buffer,0,0x81);
174
175         if (!smbcli_request_send(req) ||
176             !smbcli_request_receive(req)) {
177                 smbcli_request_destroy(req);
178                 return False;
179         }
180         
181         if (CVAL(req->in.buffer,0) != 0x82) {
182                 transport->error.etype = ETYPE_NBT;
183                 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
184                 smbcli_request_destroy(req);
185                 return False;
186         }
187
188         smbcli_request_destroy(req);
189         return True;
190 }
191
192
193 /****************************************************************************
194 get next mid in sequence
195 ****************************************************************************/
196 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
197 {
198         uint16_t mid;
199         struct smbcli_request *req;
200
201         mid = transport->next_mid;
202
203 again:
204         /* now check to see if this mid is being used by one of the 
205            pending requests. This is quite efficient because the list is
206            usually very short */
207
208         /* the zero mid is reserved for requests that don't have a mid */
209         if (mid == 0) mid = 1;
210
211         for (req=transport->pending_recv; req; req=req->next) {
212                 if (req->mid == mid) {
213                         mid++;
214                         goto again;
215                 }
216         }
217
218         transport->next_mid = mid+1;
219         return mid;
220 }
221
222 static void idle_handler(struct event_context *ev, 
223                          struct timed_event *te, time_t t)
224 {
225         struct smbcli_transport *transport = te->private;
226         te->next_event = t + transport->idle.period;
227         transport->idle.func(transport, transport->idle.private);
228 }
229
230 /*
231   setup the idle handler for a transport
232   the period is in seconds
233 */
234 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
235                                 void (*idle_func)(struct smbcli_transport *, void *),
236                                 uint_t period,
237                                 void *private)
238 {
239         struct timed_event te;
240         transport->idle.func = idle_func;
241         transport->idle.private = private;
242         transport->idle.period = period;
243
244         if (transport->event.te != NULL) {
245                 event_remove_timed(transport->event.ctx, transport->event.te);
246         }
247
248         te.next_event = time(NULL) + period;
249         te.handler = idle_handler;
250         te.private = transport;
251         transport->event.te = event_add_timed(transport->event.ctx, &te);
252 }
253
254 /*
255   process some pending sends
256 */
257 static void smbcli_transport_process_send(struct smbcli_transport *transport)
258 {
259         while (transport->pending_send) {
260                 struct smbcli_request *req = transport->pending_send;
261                 ssize_t ret;
262                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
263                 if (ret == -1) {
264                         if (errno == EAGAIN || errno == EINTR) {
265                                 return;
266                         }
267                         smbcli_transport_dead(transport);
268                 }
269                 req->out.buffer += ret;
270                 req->out.size -= ret;
271                 if (req->out.size == 0) {
272                         DLIST_REMOVE(transport->pending_send, req);
273                         if (req->one_way_request) {
274                                 req->state = SMBCLI_REQUEST_DONE;
275                                 smbcli_request_destroy(req);
276                         } else {
277                                 req->state = SMBCLI_REQUEST_RECV;
278                                 DLIST_ADD(transport->pending_recv, req);
279                         }
280                 }
281         }
282
283         /* we're out of requests to send, so don't wait for write
284            events any more */
285         smbcli_transport_write_disable(transport);
286 }
287
288 /*
289   we have a full request in our receive buffer - match it to a pending request
290   and process
291  */
292 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
293 {
294         uint8_t *buffer, *hdr, *vwv;
295         int len;
296         uint16_t wct=0, mid = 0;
297         struct smbcli_request *req;
298
299         buffer = transport->recv_buffer.buffer;
300         len = transport->recv_buffer.req_size;
301
302         ZERO_STRUCT(transport->recv_buffer);
303
304         hdr = buffer+NBT_HDR_SIZE;
305         vwv = hdr + HDR_VWV;
306
307         /* see if it could be an oplock break request */
308         if (handle_oplock_break(transport, len, hdr, vwv)) {
309                 talloc_free(buffer);
310                 return;
311         }
312
313         /* at this point we need to check for a readbraw reply, as
314            these can be any length */
315         if (transport->readbraw_pending) {
316                 transport->readbraw_pending = 0;
317
318                 /* it must match the first entry in the pending queue
319                    as the client is not allowed to have outstanding
320                    readbraw requests */
321                 req = transport->pending_recv;
322                 if (!req) goto error;
323
324                 req->in.buffer = buffer;
325                 talloc_steal(req, buffer);
326                 req->in.size = len;
327                 req->in.allocated = req->in.size;
328                 goto async;
329         }
330
331         if (len >= MIN_SMB_SIZE) {
332                 /* extract the mid for matching to pending requests */
333                 mid = SVAL(hdr, HDR_MID);
334                 wct = CVAL(hdr, HDR_WCT);
335         }
336
337         /* match the incoming request against the list of pending requests */
338         for (req=transport->pending_recv; req; req=req->next) {
339                 if (req->mid == mid) break;
340         }
341
342         if (!req) {
343                 DEBUG(1,("Discarding unmatched reply with mid %d\n", mid));
344                 goto error;
345         }
346
347         /* fill in the 'in' portion of the matching request */
348         req->in.buffer = buffer;
349         talloc_steal(req, buffer);
350         req->in.size = len;
351         req->in.allocated = req->in.size;
352
353         /* handle NBT session replies */
354         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
355                 req->status = NT_STATUS_OK;
356                 goto async;
357         }
358
359         /* handle non-SMB replies */
360         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
361                 req->state = SMBCLI_REQUEST_ERROR;
362                 goto error;
363         }
364
365         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
366                 DEBUG(2,("bad reply size for mid %d\n", mid));
367                 req->status = NT_STATUS_UNSUCCESSFUL;
368                 req->state = SMBCLI_REQUEST_ERROR;
369                 goto error;
370         }
371
372         req->in.hdr = hdr;
373         req->in.vwv = vwv;
374         req->in.wct = wct;
375         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
376                 req->in.data = req->in.vwv + VWV(wct) + 2;
377                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
378                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
379                         DEBUG(3,("bad data size for mid %d\n", mid));
380                         /* blergh - w2k3 gives a bogus data size values in some
381                            openX replies */
382                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
383                 }
384         }
385         req->in.ptr = req->in.data;
386         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
387
388         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
389                 transport->error.etype = ETYPE_DOS;
390                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
391                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
392                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
393                                               transport->error.e.dos.ecode);
394         } else {
395                 transport->error.etype = ETYPE_NT;
396                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
397                 req->status = transport->error.e.nt_status;
398         }
399
400         if (!smbcli_request_check_sign_mac(req)) {
401                 transport->error.etype = ETYPE_SOCKET;
402                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
403                 req->state = SMBCLI_REQUEST_ERROR;
404                 goto error;
405         };
406
407 async:
408         /* if this request has an async handler then call that to
409            notify that the reply has been received. This might destroy
410            the request so it must happen last */
411         DLIST_REMOVE(transport->pending_recv, req);
412         req->state = SMBCLI_REQUEST_DONE;
413         if (req->async.fn) {
414                 req->async.fn(req);
415         }
416         return;
417
418 error:
419         if (req) {
420                 DLIST_REMOVE(transport->pending_recv, req);
421                 req->state = SMBCLI_REQUEST_ERROR;
422         }
423 }
424
425 /*
426   process some pending receives
427 */
428 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
429 {
430         /* a incoming packet goes through 2 stages - first we read the
431            4 byte header, which tells us how much more is coming. Then
432            we read the rest */
433         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
434                 ssize_t ret;
435                 ret = smbcli_sock_read(transport->socket, 
436                                     transport->recv_buffer.header + 
437                                     transport->recv_buffer.received,
438                                     NBT_HDR_SIZE - transport->recv_buffer.received);
439                 if (ret == 0) {
440                         smbcli_transport_dead(transport);
441                         return;
442                 }
443                 if (ret == -1) {
444                         if (errno == EINTR || errno == EAGAIN) {
445                                 return;
446                         }
447                         smbcli_transport_dead(transport);
448                         return;
449                 }
450
451                 transport->recv_buffer.received += ret;
452
453                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
454                         /* we've got a full header */
455                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
456                         transport->recv_buffer.buffer = talloc(transport,
457                                                                NBT_HDR_SIZE+transport->recv_buffer.req_size);
458                         if (transport->recv_buffer.buffer == NULL) {
459                                 smbcli_transport_dead(transport);
460                                 return;
461                         }
462                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
463                 }
464         }
465
466         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
467                 ssize_t ret;
468                 ret = smbcli_sock_read(transport->socket, 
469                                     transport->recv_buffer.buffer + 
470                                     transport->recv_buffer.received,
471                                     transport->recv_buffer.req_size - 
472                                     transport->recv_buffer.received);
473                 if (ret == -1) {
474                         if (errno == EINTR || errno == EAGAIN) {
475                                 return;
476                         }
477                         smbcli_transport_dead(transport);
478                         return;
479                 }
480                 transport->recv_buffer.received += ret;
481         }
482
483         if (transport->recv_buffer.received != 0 &&
484             transport->recv_buffer.received == transport->recv_buffer.req_size) {
485                 smbcli_transport_finish_recv(transport);
486         }
487 }
488
489 /*
490   process some read/write requests that are pending
491   return False if the socket is dead
492 */
493 BOOL smbcli_transport_process(struct smbcli_transport *transport)
494 {
495         smbcli_transport_process_send(transport);
496         smbcli_transport_process_recv(transport);
497         if (transport->socket->fd == -1) {
498                 return False;
499         }
500         return True;
501 }
502
503
504
505 /*
506   put a request into the send queue
507 */
508 void smbcli_transport_send(struct smbcli_request *req)
509 {
510         /* check if the transport is dead */
511         if (req->transport->socket->fd == -1) {
512                 req->state = SMBCLI_REQUEST_ERROR;
513                 req->status = NT_STATUS_NET_WRITE_FAULT;
514                 return;
515         }
516
517         /* put it on the outgoing socket queue */
518         req->state = SMBCLI_REQUEST_SEND;
519         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
520
521         /* make sure we look for write events */
522         smbcli_transport_write_enable(req->transport);
523 }