r4944: every event_add_*() caller was having to call talloc_steal() to take
[jelmer/samba4-debian.git] / source / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4
5    Copyright (C) Andrew Tridgell 1994-2005
6    Copyright (C) James Myers 2003 <myersjj@samba.org>
7    
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "includes.h"
24 #include "libcli/raw/libcliraw.h"
25 #include "system/time.h"
26 #include "dlinklist.h"
27 #include "events.h"
28
29
30 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
31 static void smbcli_transport_process_send(struct smbcli_transport *transport);
32
33 /*
34   an event has happened on the socket
35 */
36 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
37                                            struct timeval t, uint16_t flags)
38 {
39         struct smbcli_transport *transport = fde->private;
40
41         if (flags & EVENT_FD_READ) {
42                 smbcli_transport_process_recv(transport);
43                 return;
44         }
45         if (flags & EVENT_FD_WRITE) {
46                 smbcli_transport_process_send(transport);
47         }
48 }
49
50 /*
51   destroy a transport
52  */
53 static int transport_destructor(void *ptr)
54 {
55         struct smbcli_transport *transport = ptr;
56
57         smbcli_transport_dead(transport);
58         return 0;
59 }
60
61 /*
62   create a transport structure based on an established socket
63 */
64 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
65 {
66         struct smbcli_transport *transport;
67
68         transport = talloc_p(sock, struct smbcli_transport);
69         if (!transport) return NULL;
70
71         ZERO_STRUCTP(transport);
72
73         transport->socket = talloc_reference(transport, sock);
74         transport->negotiate.protocol = PROTOCOL_NT1;
75         transport->options.use_spnego = lp_use_spnego();
76         transport->options.max_xmit = lp_max_xmit();
77         transport->options.max_mux = lp_maxmux();
78
79         transport->negotiate.max_xmit = transport->options.max_xmit;
80         
81         smbcli_init_signing(transport);
82
83         ZERO_STRUCT(transport->called);
84
85         /* take over event handling from the socket layer - it only
86            handles events up until we are connected */
87         transport->socket->event.fde->handler = smbcli_transport_event_handler;
88         transport->socket->event.fde->private = transport;
89         transport->socket->event.fde->flags = EVENT_FD_READ;
90
91         talloc_set_destructor(transport, transport_destructor);
92
93         return transport;
94 }
95
96 /*
97   mark the transport as dead
98 */
99 void smbcli_transport_dead(struct smbcli_transport *transport)
100 {
101         smbcli_sock_dead(transport->socket);
102
103         /* all pending sends become errors */
104         while (transport->pending_send) {
105                 struct smbcli_request *req = transport->pending_send;
106                 req->state = SMBCLI_REQUEST_ERROR;
107                 req->status = NT_STATUS_NET_WRITE_FAULT;
108                 DLIST_REMOVE(transport->pending_send, req);
109                 if (req->async.fn) {
110                         req->async.fn(req);
111                 }
112         }
113
114         /* as do all pending receives */
115         while (transport->pending_recv) {
116                 struct smbcli_request *req = transport->pending_recv;
117                 req->state = SMBCLI_REQUEST_ERROR;
118                 req->status = NT_STATUS_NET_WRITE_FAULT;
119                 DLIST_REMOVE(transport->pending_recv, req);
120                 if (req->async.fn) {
121                         req->async.fn(req);
122                 }
123         }
124 }
125
126
127 /*
128   enable select for write on a transport
129 */
130 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
131 {
132         transport->socket->event.fde->flags |= EVENT_FD_WRITE;
133 }
134
135 /*
136   disable select for write on a transport
137 */
138 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
139 {
140         transport->socket->event.fde->flags &= ~EVENT_FD_WRITE;
141 }
142
143 /*
144   send a session request
145 */
146 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
147                                                      struct nbt_name *calling, 
148                                                      struct nbt_name *called)
149 {
150         uint8_t *p;
151         struct smbcli_request *req;
152         DATA_BLOB calling_blob, called_blob;
153         TALLOC_CTX *tmp_ctx = talloc_new(transport);
154         NTSTATUS status;
155
156         status = nbt_name_dup(transport, called, &transport->called);
157         if (!NT_STATUS_IS_OK(status)) goto failed;
158         
159         status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
160         if (!NT_STATUS_IS_OK(status)) goto failed;
161
162         status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
163         if (!NT_STATUS_IS_OK(status)) goto failed;
164
165         /* allocate output buffer */
166         req = smbcli_request_setup_nonsmb(transport, 
167                                           NBT_HDR_SIZE + 
168                                           calling_blob.length + called_blob.length);
169         if (req == NULL) goto failed;
170
171         /* put in the destination name */
172         p = req->out.buffer + NBT_HDR_SIZE;
173         memcpy(p, called_blob.data, called_blob.length);
174         p += called_blob.length;
175
176         memcpy(p, calling_blob.data, calling_blob.length);
177         p += calling_blob.length;
178
179         _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer)-4);
180         SCVAL(req->out.buffer,0,0x81);
181
182         if (!smbcli_request_send(req)) {
183                 smbcli_request_destroy(req);
184                 goto failed;
185         }
186
187         talloc_free(tmp_ctx);
188         return req;
189
190 failed:
191         talloc_free(tmp_ctx);
192         return NULL;
193 }
194
195 /*
196   map a session request error to a NTSTATUS
197  */
198 static NTSTATUS map_session_refused_error(uint8_t error)
199 {
200         switch (error) {
201         case 0x80:
202         case 0x81:
203                 return NT_STATUS_REMOTE_NOT_LISTENING;
204         case 0x82:
205                 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
206         case 0x83:
207                 return NT_STATUS_REMOTE_RESOURCES;
208         }
209         return NT_STATUS_UNEXPECTED_IO_ERROR;
210 }
211
212
213 /*
214   finish a smbcli_transport_connect()
215 */
216 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
217 {
218         NTSTATUS status;
219
220         if (!smbcli_request_receive(req)) {
221                 smbcli_request_destroy(req);
222                 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
223         }
224
225         switch (CVAL(req->in.buffer,0)) {
226         case 0x82:
227                 status = NT_STATUS_OK;
228                 break;
229         case 0x83:
230                 status = map_session_refused_error(CVAL(req->in.buffer,4));
231                 break;
232         case 0x84:
233                 DEBUG(1,("Warning: session retarget not supported\n"));
234                 status = NT_STATUS_NOT_SUPPORTED;
235                 break;
236         default:
237                 status = NT_STATUS_UNEXPECTED_IO_ERROR;
238                 break;
239         }
240
241         smbcli_request_destroy(req);
242         return status;
243 }
244
245
246 /*
247   send a session request (if needed)
248 */
249 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
250                               struct nbt_name *calling, 
251                               struct nbt_name *called)
252 {
253         struct smbcli_request *req;
254         NTSTATUS status;
255
256         if (transport->socket->port == 445) {
257                 return True;
258         }
259
260         req = smbcli_transport_connect_send(transport, 
261                                             calling, called);
262         status = smbcli_transport_connect_recv(req);
263         return NT_STATUS_IS_OK(status);
264 }
265
266 /****************************************************************************
267 get next mid in sequence
268 ****************************************************************************/
269 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
270 {
271         uint16_t mid;
272         struct smbcli_request *req;
273
274         mid = transport->next_mid;
275
276 again:
277         /* now check to see if this mid is being used by one of the 
278            pending requests. This is quite efficient because the list is
279            usually very short */
280
281         /* the zero mid is reserved for requests that don't have a mid */
282         if (mid == 0) mid = 1;
283
284         for (req=transport->pending_recv; req; req=req->next) {
285                 if (req->mid == mid) {
286                         mid++;
287                         goto again;
288                 }
289         }
290
291         transport->next_mid = mid+1;
292         return mid;
293 }
294
295 static void idle_handler(struct event_context *ev, 
296                          struct timed_event *te, struct timeval t)
297 {
298         struct smbcli_transport *transport = te->private;
299         te->next_event = timeval_add(&te->next_event, 0, transport->idle.period);
300         transport->idle.func(transport, transport->idle.private);
301 }
302
303 /*
304   setup the idle handler for a transport
305   the period is in microseconds
306 */
307 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
308                                    void (*idle_func)(struct smbcli_transport *, void *),
309                                    uint64_t period,
310                                    void *private)
311 {
312         struct timed_event te;
313         transport->idle.func = idle_func;
314         transport->idle.private = private;
315         transport->idle.period = period;
316
317         if (transport->socket->event.te != NULL) {
318                 event_remove_timed(transport->socket->event.ctx, transport->socket->event.te);
319         }
320
321         te.next_event = timeval_current_ofs(0, period);
322         te.handler = idle_handler;
323         te.private = transport;
324         transport->socket->event.te = event_add_timed(transport->socket->event.ctx, 
325                                                       &te, transport);
326 }
327
328 /*
329   process some pending sends
330 */
331 static void smbcli_transport_process_send(struct smbcli_transport *transport)
332 {
333         while (transport->pending_send) {
334                 struct smbcli_request *req = transport->pending_send;
335                 ssize_t ret;
336                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
337                 if (ret == -1) {
338                         if (errno == EAGAIN || errno == EINTR) {
339                                 return;
340                         }
341                         smbcli_transport_dead(transport);
342                         return;
343                 }
344                 req->out.buffer += ret;
345                 req->out.size -= ret;
346                 if (req->out.size == 0) {
347                         DLIST_REMOVE(transport->pending_send, req);
348                         if (req->one_way_request) {
349                                 req->state = SMBCLI_REQUEST_DONE;
350                                 smbcli_request_destroy(req);
351                         } else {
352                                 req->state = SMBCLI_REQUEST_RECV;
353                                 DLIST_ADD(transport->pending_recv, req);
354                         }
355                 }
356         }
357
358         /* we're out of requests to send, so don't wait for write
359            events any more */
360         smbcli_transport_write_disable(transport);
361 }
362
363 /*
364   we have a full request in our receive buffer - match it to a pending request
365   and process
366  */
367 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
368 {
369         uint8_t *buffer, *hdr, *vwv;
370         int len;
371         uint16_t wct=0, mid = 0;
372         struct smbcli_request *req;
373
374         buffer = transport->recv_buffer.buffer;
375         len = transport->recv_buffer.req_size;
376
377         ZERO_STRUCT(transport->recv_buffer);
378
379         hdr = buffer+NBT_HDR_SIZE;
380         vwv = hdr + HDR_VWV;
381
382         /* see if it could be an oplock break request */
383         if (handle_oplock_break(transport, len, hdr, vwv)) {
384                 talloc_free(buffer);
385                 return;
386         }
387
388         /* at this point we need to check for a readbraw reply, as
389            these can be any length */
390         if (transport->readbraw_pending) {
391                 transport->readbraw_pending = 0;
392
393                 /* it must match the first entry in the pending queue
394                    as the client is not allowed to have outstanding
395                    readbraw requests */
396                 req = transport->pending_recv;
397                 if (!req) goto error;
398
399                 req->in.buffer = buffer;
400                 talloc_steal(req, buffer);
401                 req->in.size = len;
402                 req->in.allocated = req->in.size;
403                 goto async;
404         }
405
406         if (len >= MIN_SMB_SIZE) {
407                 /* extract the mid for matching to pending requests */
408                 mid = SVAL(hdr, HDR_MID);
409                 wct = CVAL(hdr, HDR_WCT);
410         }
411
412         /* match the incoming request against the list of pending requests */
413         for (req=transport->pending_recv; req; req=req->next) {
414                 if (req->mid == mid) break;
415         }
416
417         if (!req) {
418                 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", 
419                          mid, CVAL(hdr, HDR_COM)));
420                 goto error;
421         }
422
423         /* fill in the 'in' portion of the matching request */
424         req->in.buffer = buffer;
425         talloc_steal(req, buffer);
426         req->in.size = len;
427         req->in.allocated = req->in.size;
428
429         /* handle NBT session replies */
430         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
431                 req->status = NT_STATUS_OK;
432                 goto async;
433         }
434
435         /* handle non-SMB replies */
436         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
437                 req->state = SMBCLI_REQUEST_ERROR;
438                 goto error;
439         }
440
441         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
442                 DEBUG(2,("bad reply size for mid %d\n", mid));
443                 req->status = NT_STATUS_UNSUCCESSFUL;
444                 req->state = SMBCLI_REQUEST_ERROR;
445                 goto error;
446         }
447
448         req->in.hdr = hdr;
449         req->in.vwv = vwv;
450         req->in.wct = wct;
451         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
452                 req->in.data = req->in.vwv + VWV(wct) + 2;
453                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
454                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
455                         DEBUG(3,("bad data size for mid %d\n", mid));
456                         /* blergh - w2k3 gives a bogus data size values in some
457                            openX replies */
458                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
459                 }
460         }
461         req->in.ptr = req->in.data;
462         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
463
464         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
465                 transport->error.etype = ETYPE_DOS;
466                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
467                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
468                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
469                                               transport->error.e.dos.ecode);
470         } else {
471                 transport->error.etype = ETYPE_NT;
472                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
473                 req->status = transport->error.e.nt_status;
474         }
475
476         if (!smbcli_request_check_sign_mac(req)) {
477                 transport->error.etype = ETYPE_SOCKET;
478                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
479                 req->state = SMBCLI_REQUEST_ERROR;
480                 req->status = NT_STATUS_ACCESS_DENIED;
481                 goto error;
482         };
483
484 async:
485         /* if this request has an async handler then call that to
486            notify that the reply has been received. This might destroy
487            the request so it must happen last */
488         DLIST_REMOVE(transport->pending_recv, req);
489         req->state = SMBCLI_REQUEST_DONE;
490         if (req->async.fn) {
491                 req->async.fn(req);
492         }
493         return;
494
495 error:
496         if (req) {
497                 DLIST_REMOVE(transport->pending_recv, req);
498                 req->state = SMBCLI_REQUEST_ERROR;
499         }
500 }
501
502 /*
503   process some pending receives
504 */
505 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
506 {
507         /* a incoming packet goes through 2 stages - first we read the
508            4 byte header, which tells us how much more is coming. Then
509            we read the rest */
510         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
511                 ssize_t ret;
512                 ret = smbcli_sock_read(transport->socket, 
513                                     transport->recv_buffer.header + 
514                                     transport->recv_buffer.received,
515                                     NBT_HDR_SIZE - transport->recv_buffer.received);
516                 if (ret == -1) {
517                         smbcli_transport_dead(transport);
518                         return;
519                 }
520
521                 transport->recv_buffer.received += ret;
522
523                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
524                         /* we've got a full header */
525                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
526                         transport->recv_buffer.buffer = talloc_size(transport,
527                                                                     NBT_HDR_SIZE+transport->recv_buffer.req_size);
528                         if (transport->recv_buffer.buffer == NULL) {
529                                 smbcli_transport_dead(transport);
530                                 return;
531                         }
532                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
533                 }
534         }
535
536         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
537                 ssize_t ret;
538                 ret = smbcli_sock_read(transport->socket, 
539                                     transport->recv_buffer.buffer + 
540                                     transport->recv_buffer.received,
541                                     transport->recv_buffer.req_size - 
542                                     transport->recv_buffer.received);
543                 if (ret == -1) {
544                         smbcli_transport_dead(transport);
545                         return;
546                 }
547                 transport->recv_buffer.received += ret;
548         }
549
550         if (transport->recv_buffer.received != 0 &&
551             transport->recv_buffer.received == transport->recv_buffer.req_size) {
552                 smbcli_transport_finish_recv(transport);
553         }
554 }
555
556 /*
557   process some read/write requests that are pending
558   return False if the socket is dead
559 */
560 BOOL smbcli_transport_process(struct smbcli_transport *transport)
561 {
562         smbcli_transport_process_send(transport);
563         smbcli_transport_process_recv(transport);
564         if (transport->socket->sock == NULL) {
565                 return False;
566         }
567         return True;
568 }
569
570
571
572 /*
573   put a request into the send queue
574 */
575 void smbcli_transport_send(struct smbcli_request *req)
576 {
577         /* check if the transport is dead */
578         if (req->transport->socket->sock == NULL) {
579                 req->state = SMBCLI_REQUEST_ERROR;
580                 req->status = NT_STATUS_NET_WRITE_FAULT;
581                 return;
582         }
583
584         /* put it on the outgoing socket queue */
585         req->state = SMBCLI_REQUEST_SEND;
586         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
587
588         /* make sure we look for write events */
589         smbcli_transport_write_enable(req->transport);
590 }