r3463: separated out some more headers (asn_1.h, messages.h, dlinklist.h and ioctl.h)
[samba.git] / source4 / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4    Copyright (C) Andrew Tridgell 1994-2003
5    Copyright (C) James Myers 2003 <myersjj@samba.org>
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include "includes.h"
23 #include "libcli/raw/libcliraw.h"
24 #include "system/time.h"
25 #include "dlinklist.h"
26
27
28 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
29 static void smbcli_transport_process_send(struct smbcli_transport *transport);
30
31 /*
32   an event has happened on the socket
33 */
34 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
35                                         time_t t, uint16_t flags)
36 {
37         struct smbcli_transport *transport = fde->private;
38
39         if (flags & EVENT_FD_READ) {
40                 smbcli_transport_process_recv(transport);
41         }
42         if (flags & EVENT_FD_WRITE) {
43                 smbcli_transport_process_send(transport);
44         }
45 }
46
47 /*
48   destroy a transport
49  */
50 static int transport_destructor(void *ptr)
51 {
52         struct smbcli_transport *transport = ptr;
53
54         smbcli_transport_dead(transport);
55         event_remove_fd(transport->event.ctx, transport->event.fde);
56         event_remove_timed(transport->event.ctx, transport->event.te);
57         return 0;
58 }
59
60 /*
61   create a transport structure based on an established socket
62 */
63 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
64 {
65         struct smbcli_transport *transport;
66         struct fd_event fde;
67
68         transport = talloc_p(sock, struct smbcli_transport);
69         if (!transport) return NULL;
70
71         ZERO_STRUCTP(transport);
72
73         transport->event.ctx = event_context_init(transport);
74         if (transport->event.ctx == NULL) {
75                 talloc_free(transport);
76                 return NULL;
77         }
78
79         transport->socket = talloc_reference(transport, sock);
80         transport->negotiate.protocol = PROTOCOL_NT1;
81         transport->options.use_spnego = lp_use_spnego();
82         transport->options.max_xmit = lp_max_xmit();
83         transport->options.max_mux = lp_maxmux();
84
85         transport->negotiate.max_xmit = transport->options.max_xmit;
86         
87         smbcli_init_signing(transport);
88
89         ZERO_STRUCT(transport->called);
90
91         fde.fd = socket_get_fd(sock->sock);
92         fde.flags = EVENT_FD_READ;
93         fde.handler = smbcli_transport_event_handler;
94         fde.private = transport;
95         fde.ref_count = 1;
96
97         transport->event.fde = event_add_fd(transport->event.ctx, &fde);
98
99         talloc_set_destructor(transport, transport_destructor);
100
101         return transport;
102 }
103
104 /*
105   mark the transport as dead
106 */
107 void smbcli_transport_dead(struct smbcli_transport *transport)
108 {
109         smbcli_sock_dead(transport->socket);
110
111         /* all pending sends become errors */
112         while (transport->pending_send) {
113                 struct smbcli_request *req = transport->pending_send;
114                 req->state = SMBCLI_REQUEST_ERROR;
115                 req->status = NT_STATUS_NET_WRITE_FAULT;
116                 DLIST_REMOVE(transport->pending_send, req);
117                 if (req->async.fn) {
118                         req->async.fn(req);
119                 }
120         }
121
122         /* as do all pending receives */
123         while (transport->pending_recv) {
124                 struct smbcli_request *req = transport->pending_recv;
125                 req->state = SMBCLI_REQUEST_ERROR;
126                 req->status = NT_STATUS_NET_WRITE_FAULT;
127                 DLIST_REMOVE(transport->pending_recv, req);
128                 if (req->async.fn) {
129                         req->async.fn(req);
130                 }
131         }
132 }
133
134
135 /*
136   enable select for write on a transport
137 */
138 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
139 {
140         transport->event.fde->flags |= EVENT_FD_WRITE;
141 }
142
143 /*
144   disable select for write on a transport
145 */
146 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
147 {
148         transport->event.fde->flags &= ~EVENT_FD_WRITE;
149 }
150
151 /****************************************************************************
152 send a session request (if appropriate)
153 ****************************************************************************/
154 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
155                            struct nmb_name *calling, 
156                            struct nmb_name *called)
157 {
158         char *p;
159         int len = NBT_HDR_SIZE;
160         struct smbcli_request *req;
161
162         if (called) {
163                 transport->called = *called;
164         }
165
166         /* 445 doesn't have session request */
167         if (transport->socket->port == 445) {
168                 return True;
169         }
170
171         /* allocate output buffer */
172         req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
173
174         /* put in the destination name */
175         p = req->out.buffer + NBT_HDR_SIZE;
176         name_mangle(called->name, p, called->name_type);
177         len += name_len(p);
178
179         /* and my name */
180         p = req->out.buffer+len;
181         name_mangle(calling->name, p, calling->name_type);
182         len += name_len(p);
183
184         _smb_setlen(req->out.buffer,len-4);
185         SCVAL(req->out.buffer,0,0x81);
186
187         if (!smbcli_request_send(req) ||
188             !smbcli_request_receive(req)) {
189                 smbcli_request_destroy(req);
190                 return False;
191         }
192         
193         if (CVAL(req->in.buffer,0) != 0x82) {
194                 transport->error.etype = ETYPE_NBT;
195                 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
196                 smbcli_request_destroy(req);
197                 return False;
198         }
199
200         smbcli_request_destroy(req);
201         return True;
202 }
203
204
205 /****************************************************************************
206 get next mid in sequence
207 ****************************************************************************/
208 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
209 {
210         uint16_t mid;
211         struct smbcli_request *req;
212
213         mid = transport->next_mid;
214
215 again:
216         /* now check to see if this mid is being used by one of the 
217            pending requests. This is quite efficient because the list is
218            usually very short */
219
220         /* the zero mid is reserved for requests that don't have a mid */
221         if (mid == 0) mid = 1;
222
223         for (req=transport->pending_recv; req; req=req->next) {
224                 if (req->mid == mid) {
225                         mid++;
226                         goto again;
227                 }
228         }
229
230         transport->next_mid = mid+1;
231         return mid;
232 }
233
234 static void idle_handler(struct event_context *ev, 
235                          struct timed_event *te, time_t t)
236 {
237         struct smbcli_transport *transport = te->private;
238         te->next_event = t + transport->idle.period;
239         transport->idle.func(transport, transport->idle.private);
240 }
241
242 /*
243   setup the idle handler for a transport
244   the period is in seconds
245 */
246 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
247                                 void (*idle_func)(struct smbcli_transport *, void *),
248                                 uint_t period,
249                                 void *private)
250 {
251         struct timed_event te;
252         transport->idle.func = idle_func;
253         transport->idle.private = private;
254         transport->idle.period = period;
255
256         if (transport->event.te != NULL) {
257                 event_remove_timed(transport->event.ctx, transport->event.te);
258         }
259
260         te.next_event = time(NULL) + period;
261         te.handler = idle_handler;
262         te.private = transport;
263         transport->event.te = event_add_timed(transport->event.ctx, &te);
264 }
265
266 /*
267   process some pending sends
268 */
269 static void smbcli_transport_process_send(struct smbcli_transport *transport)
270 {
271         while (transport->pending_send) {
272                 struct smbcli_request *req = transport->pending_send;
273                 ssize_t ret;
274                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
275                 if (ret == -1) {
276                         if (errno == EAGAIN || errno == EINTR) {
277                                 return;
278                         }
279                         smbcli_transport_dead(transport);
280                         return;
281                 }
282                 req->out.buffer += ret;
283                 req->out.size -= ret;
284                 if (req->out.size == 0) {
285                         DLIST_REMOVE(transport->pending_send, req);
286                         if (req->one_way_request) {
287                                 req->state = SMBCLI_REQUEST_DONE;
288                                 smbcli_request_destroy(req);
289                         } else {
290                                 req->state = SMBCLI_REQUEST_RECV;
291                                 DLIST_ADD(transport->pending_recv, req);
292                         }
293                 }
294         }
295
296         /* we're out of requests to send, so don't wait for write
297            events any more */
298         smbcli_transport_write_disable(transport);
299 }
300
301 /*
302   we have a full request in our receive buffer - match it to a pending request
303   and process
304  */
305 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
306 {
307         uint8_t *buffer, *hdr, *vwv;
308         int len;
309         uint16_t wct=0, mid = 0;
310         struct smbcli_request *req;
311
312         buffer = transport->recv_buffer.buffer;
313         len = transport->recv_buffer.req_size;
314
315         ZERO_STRUCT(transport->recv_buffer);
316
317         hdr = buffer+NBT_HDR_SIZE;
318         vwv = hdr + HDR_VWV;
319
320         /* see if it could be an oplock break request */
321         if (handle_oplock_break(transport, len, hdr, vwv)) {
322                 talloc_free(buffer);
323                 return;
324         }
325
326         /* at this point we need to check for a readbraw reply, as
327            these can be any length */
328         if (transport->readbraw_pending) {
329                 transport->readbraw_pending = 0;
330
331                 /* it must match the first entry in the pending queue
332                    as the client is not allowed to have outstanding
333                    readbraw requests */
334                 req = transport->pending_recv;
335                 if (!req) goto error;
336
337                 req->in.buffer = buffer;
338                 talloc_steal(req, buffer);
339                 req->in.size = len;
340                 req->in.allocated = req->in.size;
341                 goto async;
342         }
343
344         if (len >= MIN_SMB_SIZE) {
345                 /* extract the mid for matching to pending requests */
346                 mid = SVAL(hdr, HDR_MID);
347                 wct = CVAL(hdr, HDR_WCT);
348         }
349
350         /* match the incoming request against the list of pending requests */
351         for (req=transport->pending_recv; req; req=req->next) {
352                 if (req->mid == mid) break;
353         }
354
355         if (!req) {
356                 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", 
357                          mid, CVAL(hdr, HDR_COM)));
358                 goto error;
359         }
360
361         /* fill in the 'in' portion of the matching request */
362         req->in.buffer = buffer;
363         talloc_steal(req, buffer);
364         req->in.size = len;
365         req->in.allocated = req->in.size;
366
367         /* handle NBT session replies */
368         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
369                 req->status = NT_STATUS_OK;
370                 goto async;
371         }
372
373         /* handle non-SMB replies */
374         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
375                 req->state = SMBCLI_REQUEST_ERROR;
376                 goto error;
377         }
378
379         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
380                 DEBUG(2,("bad reply size for mid %d\n", mid));
381                 req->status = NT_STATUS_UNSUCCESSFUL;
382                 req->state = SMBCLI_REQUEST_ERROR;
383                 goto error;
384         }
385
386         req->in.hdr = hdr;
387         req->in.vwv = vwv;
388         req->in.wct = wct;
389         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
390                 req->in.data = req->in.vwv + VWV(wct) + 2;
391                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
392                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
393                         DEBUG(3,("bad data size for mid %d\n", mid));
394                         /* blergh - w2k3 gives a bogus data size values in some
395                            openX replies */
396                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
397                 }
398         }
399         req->in.ptr = req->in.data;
400         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
401
402         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
403                 transport->error.etype = ETYPE_DOS;
404                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
405                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
406                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
407                                               transport->error.e.dos.ecode);
408         } else {
409                 transport->error.etype = ETYPE_NT;
410                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
411                 req->status = transport->error.e.nt_status;
412         }
413
414         if (!smbcli_request_check_sign_mac(req)) {
415                 transport->error.etype = ETYPE_SOCKET;
416                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
417                 req->state = SMBCLI_REQUEST_ERROR;
418                 goto error;
419         };
420
421 async:
422         /* if this request has an async handler then call that to
423            notify that the reply has been received. This might destroy
424            the request so it must happen last */
425         DLIST_REMOVE(transport->pending_recv, req);
426         req->state = SMBCLI_REQUEST_DONE;
427         if (req->async.fn) {
428                 req->async.fn(req);
429         }
430         return;
431
432 error:
433         if (req) {
434                 DLIST_REMOVE(transport->pending_recv, req);
435                 req->state = SMBCLI_REQUEST_ERROR;
436         }
437 }
438
439 /*
440   process some pending receives
441 */
442 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
443 {
444         /* a incoming packet goes through 2 stages - first we read the
445            4 byte header, which tells us how much more is coming. Then
446            we read the rest */
447         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
448                 ssize_t ret;
449                 ret = smbcli_sock_read(transport->socket, 
450                                     transport->recv_buffer.header + 
451                                     transport->recv_buffer.received,
452                                     NBT_HDR_SIZE - transport->recv_buffer.received);
453                 if (ret == -1) {
454                         smbcli_transport_dead(transport);
455                         return;
456                 }
457
458                 transport->recv_buffer.received += ret;
459
460                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
461                         /* we've got a full header */
462                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
463                         transport->recv_buffer.buffer = talloc(transport,
464                                                                NBT_HDR_SIZE+transport->recv_buffer.req_size);
465                         if (transport->recv_buffer.buffer == NULL) {
466                                 smbcli_transport_dead(transport);
467                                 return;
468                         }
469                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
470                 }
471         }
472
473         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
474                 ssize_t ret;
475                 ret = smbcli_sock_read(transport->socket, 
476                                     transport->recv_buffer.buffer + 
477                                     transport->recv_buffer.received,
478                                     transport->recv_buffer.req_size - 
479                                     transport->recv_buffer.received);
480                 if (ret == -1) {
481                         smbcli_transport_dead(transport);
482                         return;
483                 }
484                 transport->recv_buffer.received += ret;
485         }
486
487         if (transport->recv_buffer.received != 0 &&
488             transport->recv_buffer.received == transport->recv_buffer.req_size) {
489                 smbcli_transport_finish_recv(transport);
490         }
491 }
492
493 /*
494   process some read/write requests that are pending
495   return False if the socket is dead
496 */
497 BOOL smbcli_transport_process(struct smbcli_transport *transport)
498 {
499         smbcli_transport_process_send(transport);
500         smbcli_transport_process_recv(transport);
501         if (transport->socket->sock == NULL) {
502                 return False;
503         }
504         return True;
505 }
506
507
508
509 /*
510   put a request into the send queue
511 */
512 void smbcli_transport_send(struct smbcli_request *req)
513 {
514         /* check if the transport is dead */
515         if (req->transport->socket->sock == NULL) {
516                 req->state = SMBCLI_REQUEST_ERROR;
517                 req->status = NT_STATUS_NET_WRITE_FAULT;
518                 return;
519         }
520
521         /* put it on the outgoing socket queue */
522         req->state = SMBCLI_REQUEST_SEND;
523         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
524
525         /* make sure we look for write events */
526         smbcli_transport_write_enable(req->transport);
527 }