r4954: we don't need the separate event_remove_*() calls any more, as you now
[kai/samba-autobuild/.git] / source4 / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4
5    Copyright (C) Andrew Tridgell 1994-2005
6    Copyright (C) James Myers 2003 <myersjj@samba.org>
7    
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "includes.h"
24 #include "libcli/raw/libcliraw.h"
25 #include "system/time.h"
26 #include "dlinklist.h"
27 #include "events.h"
28
29
30 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
31 static void smbcli_transport_process_send(struct smbcli_transport *transport);
32
33 /*
34   an event has happened on the socket
35 */
36 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
37                                            struct timeval t, uint16_t flags)
38 {
39         struct smbcli_transport *transport = fde->private;
40
41         if (flags & EVENT_FD_READ) {
42                 smbcli_transport_process_recv(transport);
43                 return;
44         }
45         if (flags & EVENT_FD_WRITE) {
46                 smbcli_transport_process_send(transport);
47         }
48 }
49
50 /*
51   destroy a transport
52  */
53 static int transport_destructor(void *ptr)
54 {
55         struct smbcli_transport *transport = ptr;
56
57         smbcli_transport_dead(transport);
58         return 0;
59 }
60
61 /*
62   create a transport structure based on an established socket
63 */
64 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
65                                                TALLOC_CTX *parent_ctx, BOOL primary)
66 {
67         struct smbcli_transport *transport;
68
69         transport = talloc_zero(parent_ctx, struct smbcli_transport);
70         if (!transport) return NULL;
71
72         if (primary) {
73                 transport->socket = talloc_steal(transport, sock);
74         } else {
75                 transport->socket = talloc_reference(transport, sock);
76         }
77         transport->negotiate.protocol = PROTOCOL_NT1;
78         transport->options.use_spnego = lp_use_spnego();
79         transport->options.max_xmit = lp_max_xmit();
80         transport->options.max_mux = lp_maxmux();
81
82         transport->negotiate.max_xmit = transport->options.max_xmit;
83         
84         smbcli_init_signing(transport);
85
86         ZERO_STRUCT(transport->called);
87
88         /* take over event handling from the socket layer - it only
89            handles events up until we are connected */
90         transport->socket->event.fde->handler = smbcli_transport_event_handler;
91         transport->socket->event.fde->private = transport;
92         transport->socket->event.fde->flags = EVENT_FD_READ;
93
94         talloc_set_destructor(transport, transport_destructor);
95
96         return transport;
97 }
98
99 /*
100   mark the transport as dead
101 */
102 void smbcli_transport_dead(struct smbcli_transport *transport)
103 {
104         smbcli_sock_dead(transport->socket);
105
106         /* all pending sends become errors */
107         while (transport->pending_send) {
108                 struct smbcli_request *req = transport->pending_send;
109                 req->state = SMBCLI_REQUEST_ERROR;
110                 req->status = NT_STATUS_NET_WRITE_FAULT;
111                 DLIST_REMOVE(transport->pending_send, req);
112                 if (req->async.fn) {
113                         req->async.fn(req);
114                 }
115         }
116
117         /* as do all pending receives */
118         while (transport->pending_recv) {
119                 struct smbcli_request *req = transport->pending_recv;
120                 req->state = SMBCLI_REQUEST_ERROR;
121                 req->status = NT_STATUS_NET_WRITE_FAULT;
122                 DLIST_REMOVE(transport->pending_recv, req);
123                 if (req->async.fn) {
124                         req->async.fn(req);
125                 }
126         }
127 }
128
129
130 /*
131   enable select for write on a transport
132 */
133 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
134 {
135         transport->socket->event.fde->flags |= EVENT_FD_WRITE;
136 }
137
138 /*
139   disable select for write on a transport
140 */
141 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
142 {
143         transport->socket->event.fde->flags &= ~EVENT_FD_WRITE;
144 }
145
146 /*
147   send a session request
148 */
149 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
150                                                      struct nbt_name *calling, 
151                                                      struct nbt_name *called)
152 {
153         uint8_t *p;
154         struct smbcli_request *req;
155         DATA_BLOB calling_blob, called_blob;
156         TALLOC_CTX *tmp_ctx = talloc_new(transport);
157         NTSTATUS status;
158
159         status = nbt_name_dup(transport, called, &transport->called);
160         if (!NT_STATUS_IS_OK(status)) goto failed;
161         
162         status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
163         if (!NT_STATUS_IS_OK(status)) goto failed;
164
165         status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
166         if (!NT_STATUS_IS_OK(status)) goto failed;
167
168         /* allocate output buffer */
169         req = smbcli_request_setup_nonsmb(transport, 
170                                           NBT_HDR_SIZE + 
171                                           calling_blob.length + called_blob.length);
172         if (req == NULL) goto failed;
173
174         /* put in the destination name */
175         p = req->out.buffer + NBT_HDR_SIZE;
176         memcpy(p, called_blob.data, called_blob.length);
177         p += called_blob.length;
178
179         memcpy(p, calling_blob.data, calling_blob.length);
180         p += calling_blob.length;
181
182         _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer)-4);
183         SCVAL(req->out.buffer,0,0x81);
184
185         if (!smbcli_request_send(req)) {
186                 smbcli_request_destroy(req);
187                 goto failed;
188         }
189
190         talloc_free(tmp_ctx);
191         return req;
192
193 failed:
194         talloc_free(tmp_ctx);
195         return NULL;
196 }
197
198 /*
199   map a session request error to a NTSTATUS
200  */
201 static NTSTATUS map_session_refused_error(uint8_t error)
202 {
203         switch (error) {
204         case 0x80:
205         case 0x81:
206                 return NT_STATUS_REMOTE_NOT_LISTENING;
207         case 0x82:
208                 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
209         case 0x83:
210                 return NT_STATUS_REMOTE_RESOURCES;
211         }
212         return NT_STATUS_UNEXPECTED_IO_ERROR;
213 }
214
215
216 /*
217   finish a smbcli_transport_connect()
218 */
219 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
220 {
221         NTSTATUS status;
222
223         if (!smbcli_request_receive(req)) {
224                 smbcli_request_destroy(req);
225                 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
226         }
227
228         switch (CVAL(req->in.buffer,0)) {
229         case 0x82:
230                 status = NT_STATUS_OK;
231                 break;
232         case 0x83:
233                 status = map_session_refused_error(CVAL(req->in.buffer,4));
234                 break;
235         case 0x84:
236                 DEBUG(1,("Warning: session retarget not supported\n"));
237                 status = NT_STATUS_NOT_SUPPORTED;
238                 break;
239         default:
240                 status = NT_STATUS_UNEXPECTED_IO_ERROR;
241                 break;
242         }
243
244         smbcli_request_destroy(req);
245         return status;
246 }
247
248
249 /*
250   send a session request (if needed)
251 */
252 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
253                               struct nbt_name *calling, 
254                               struct nbt_name *called)
255 {
256         struct smbcli_request *req;
257         NTSTATUS status;
258
259         if (transport->socket->port == 445) {
260                 return True;
261         }
262
263         req = smbcli_transport_connect_send(transport, 
264                                             calling, called);
265         status = smbcli_transport_connect_recv(req);
266         return NT_STATUS_IS_OK(status);
267 }
268
269 /****************************************************************************
270 get next mid in sequence
271 ****************************************************************************/
272 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
273 {
274         uint16_t mid;
275         struct smbcli_request *req;
276
277         mid = transport->next_mid;
278
279 again:
280         /* now check to see if this mid is being used by one of the 
281            pending requests. This is quite efficient because the list is
282            usually very short */
283
284         /* the zero mid is reserved for requests that don't have a mid */
285         if (mid == 0) mid = 1;
286
287         for (req=transport->pending_recv; req; req=req->next) {
288                 if (req->mid == mid) {
289                         mid++;
290                         goto again;
291                 }
292         }
293
294         transport->next_mid = mid+1;
295         return mid;
296 }
297
298 static void idle_handler(struct event_context *ev, 
299                          struct timed_event *te, struct timeval t)
300 {
301         struct smbcli_transport *transport = te->private;
302         te->next_event = timeval_add(&te->next_event, 0, transport->idle.period);
303         transport->idle.func(transport, transport->idle.private);
304 }
305
306 /*
307   setup the idle handler for a transport
308   the period is in microseconds
309 */
310 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
311                                    void (*idle_func)(struct smbcli_transport *, void *),
312                                    uint64_t period,
313                                    void *private)
314 {
315         struct timed_event te;
316         transport->idle.func = idle_func;
317         transport->idle.private = private;
318         transport->idle.period = period;
319
320         if (transport->socket->event.te != NULL) {
321                 talloc_free(transport->socket->event.te);
322         }
323
324         te.next_event = timeval_current_ofs(0, period);
325         te.handler = idle_handler;
326         te.private = transport;
327         transport->socket->event.te = event_add_timed(transport->socket->event.ctx, 
328                                                       &te, transport);
329 }
330
331 /*
332   process some pending sends
333 */
334 static void smbcli_transport_process_send(struct smbcli_transport *transport)
335 {
336         while (transport->pending_send) {
337                 struct smbcli_request *req = transport->pending_send;
338                 ssize_t ret;
339                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
340                 if (ret == -1) {
341                         if (errno == EAGAIN || errno == EINTR) {
342                                 return;
343                         }
344                         smbcli_transport_dead(transport);
345                         return;
346                 }
347                 req->out.buffer += ret;
348                 req->out.size -= ret;
349                 if (req->out.size == 0) {
350                         DLIST_REMOVE(transport->pending_send, req);
351                         if (req->one_way_request) {
352                                 req->state = SMBCLI_REQUEST_DONE;
353                                 smbcli_request_destroy(req);
354                         } else {
355                                 req->state = SMBCLI_REQUEST_RECV;
356                                 DLIST_ADD(transport->pending_recv, req);
357                         }
358                 }
359         }
360
361         /* we're out of requests to send, so don't wait for write
362            events any more */
363         smbcli_transport_write_disable(transport);
364 }
365
366 /*
367   we have a full request in our receive buffer - match it to a pending request
368   and process
369  */
370 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
371 {
372         uint8_t *buffer, *hdr, *vwv;
373         int len;
374         uint16_t wct=0, mid = 0;
375         struct smbcli_request *req;
376
377         buffer = transport->recv_buffer.buffer;
378         len = transport->recv_buffer.req_size;
379
380         ZERO_STRUCT(transport->recv_buffer);
381
382         hdr = buffer+NBT_HDR_SIZE;
383         vwv = hdr + HDR_VWV;
384
385         /* see if it could be an oplock break request */
386         if (handle_oplock_break(transport, len, hdr, vwv)) {
387                 talloc_free(buffer);
388                 return;
389         }
390
391         /* at this point we need to check for a readbraw reply, as
392            these can be any length */
393         if (transport->readbraw_pending) {
394                 transport->readbraw_pending = 0;
395
396                 /* it must match the first entry in the pending queue
397                    as the client is not allowed to have outstanding
398                    readbraw requests */
399                 req = transport->pending_recv;
400                 if (!req) goto error;
401
402                 req->in.buffer = buffer;
403                 talloc_steal(req, buffer);
404                 req->in.size = len;
405                 req->in.allocated = req->in.size;
406                 goto async;
407         }
408
409         if (len >= MIN_SMB_SIZE) {
410                 /* extract the mid for matching to pending requests */
411                 mid = SVAL(hdr, HDR_MID);
412                 wct = CVAL(hdr, HDR_WCT);
413         }
414
415         /* match the incoming request against the list of pending requests */
416         for (req=transport->pending_recv; req; req=req->next) {
417                 if (req->mid == mid) break;
418         }
419
420         if (!req) {
421                 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", 
422                          mid, CVAL(hdr, HDR_COM)));
423                 goto error;
424         }
425
426         /* fill in the 'in' portion of the matching request */
427         req->in.buffer = buffer;
428         talloc_steal(req, buffer);
429         req->in.size = len;
430         req->in.allocated = req->in.size;
431
432         /* handle NBT session replies */
433         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
434                 req->status = NT_STATUS_OK;
435                 goto async;
436         }
437
438         /* handle non-SMB replies */
439         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
440                 req->state = SMBCLI_REQUEST_ERROR;
441                 goto error;
442         }
443
444         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
445                 DEBUG(2,("bad reply size for mid %d\n", mid));
446                 req->status = NT_STATUS_UNSUCCESSFUL;
447                 req->state = SMBCLI_REQUEST_ERROR;
448                 goto error;
449         }
450
451         req->in.hdr = hdr;
452         req->in.vwv = vwv;
453         req->in.wct = wct;
454         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
455                 req->in.data = req->in.vwv + VWV(wct) + 2;
456                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
457                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
458                         DEBUG(3,("bad data size for mid %d\n", mid));
459                         /* blergh - w2k3 gives a bogus data size values in some
460                            openX replies */
461                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
462                 }
463         }
464         req->in.ptr = req->in.data;
465         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
466
467         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
468                 transport->error.etype = ETYPE_DOS;
469                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
470                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
471                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
472                                               transport->error.e.dos.ecode);
473         } else {
474                 transport->error.etype = ETYPE_NT;
475                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
476                 req->status = transport->error.e.nt_status;
477         }
478
479         if (!smbcli_request_check_sign_mac(req)) {
480                 transport->error.etype = ETYPE_SOCKET;
481                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
482                 req->state = SMBCLI_REQUEST_ERROR;
483                 req->status = NT_STATUS_ACCESS_DENIED;
484                 goto error;
485         };
486
487 async:
488         /* if this request has an async handler then call that to
489            notify that the reply has been received. This might destroy
490            the request so it must happen last */
491         DLIST_REMOVE(transport->pending_recv, req);
492         req->state = SMBCLI_REQUEST_DONE;
493         if (req->async.fn) {
494                 req->async.fn(req);
495         }
496         return;
497
498 error:
499         if (req) {
500                 DLIST_REMOVE(transport->pending_recv, req);
501                 req->state = SMBCLI_REQUEST_ERROR;
502         }
503 }
504
505 /*
506   process some pending receives
507 */
508 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
509 {
510         /* a incoming packet goes through 2 stages - first we read the
511            4 byte header, which tells us how much more is coming. Then
512            we read the rest */
513         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
514                 ssize_t ret;
515                 ret = smbcli_sock_read(transport->socket, 
516                                     transport->recv_buffer.header + 
517                                     transport->recv_buffer.received,
518                                     NBT_HDR_SIZE - transport->recv_buffer.received);
519                 if (ret == -1) {
520                         smbcli_transport_dead(transport);
521                         return;
522                 }
523
524                 transport->recv_buffer.received += ret;
525
526                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
527                         /* we've got a full header */
528                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
529                         transport->recv_buffer.buffer = talloc_size(transport,
530                                                                     NBT_HDR_SIZE+transport->recv_buffer.req_size);
531                         if (transport->recv_buffer.buffer == NULL) {
532                                 smbcli_transport_dead(transport);
533                                 return;
534                         }
535                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
536                 }
537         }
538
539         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
540                 ssize_t ret;
541                 ret = smbcli_sock_read(transport->socket, 
542                                     transport->recv_buffer.buffer + 
543                                     transport->recv_buffer.received,
544                                     transport->recv_buffer.req_size - 
545                                     transport->recv_buffer.received);
546                 if (ret == -1) {
547                         smbcli_transport_dead(transport);
548                         return;
549                 }
550                 transport->recv_buffer.received += ret;
551         }
552
553         if (transport->recv_buffer.received != 0 &&
554             transport->recv_buffer.received == transport->recv_buffer.req_size) {
555                 smbcli_transport_finish_recv(transport);
556         }
557 }
558
559 /*
560   process some read/write requests that are pending
561   return False if the socket is dead
562 */
563 BOOL smbcli_transport_process(struct smbcli_transport *transport)
564 {
565         smbcli_transport_process_send(transport);
566         smbcli_transport_process_recv(transport);
567         if (transport->socket->sock == NULL) {
568                 return False;
569         }
570         return True;
571 }
572
573
574
575 /*
576   put a request into the send queue
577 */
578 void smbcli_transport_send(struct smbcli_request *req)
579 {
580         /* check if the transport is dead */
581         if (req->transport->socket->sock == NULL) {
582                 req->state = SMBCLI_REQUEST_ERROR;
583                 req->status = NT_STATUS_NET_WRITE_FAULT;
584                 return;
585         }
586
587         /* put it on the outgoing socket queue */
588         req->state = SMBCLI_REQUEST_SEND;
589         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
590
591         /* make sure we look for write events */
592         smbcli_transport_write_enable(req->transport);
593 }