r1896: stricter check on packet parsing for NBT session replies
[ira/wip.git] / source4 / libcli / raw / clitransport.c
1 /* 
2    Unix SMB/CIFS implementation.
3    SMB client transport context management functions
4    Copyright (C) Andrew Tridgell 1994-2003
5    Copyright (C) James Myers 2003 <myersjj@samba.org>
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include "includes.h"
23
24 /*
25   an event has happened on the socket
26 */
27 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde, 
28                                         time_t t, uint16_t flags)
29 {
30         struct smbcli_transport *transport = fde->private;
31
32         smbcli_transport_process(transport);
33 }
34
35 /*
36   create a transport structure based on an established socket
37 */
38 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
39 {
40         TALLOC_CTX *mem_ctx;
41         struct smbcli_transport *transport;
42         struct fd_event fde;
43
44         mem_ctx = talloc_init("smbcli_transport");
45         if (!mem_ctx) return NULL;
46
47         transport = talloc_zero(mem_ctx, sizeof(*transport));
48         if (!transport) return NULL;
49
50         transport->event.ctx = event_context_init();
51         if (transport->event.ctx == NULL) {
52                 talloc_destroy(mem_ctx);
53                 return NULL;
54         }
55
56         transport->mem_ctx = mem_ctx;
57         transport->socket = sock;
58         transport->negotiate.protocol = PROTOCOL_NT1;
59         transport->options.use_spnego = lp_use_spnego();
60         transport->negotiate.max_xmit = ~0;
61         
62         smbcli_init_signing(transport);
63
64         transport->socket->reference_count++;
65
66         ZERO_STRUCT(transport->called);
67
68         fde.fd = sock->fd;
69         fde.flags = EVENT_FD_READ;
70         fde.handler = smbcli_transport_event_handler;
71         fde.private = transport;
72         fde.ref_count = 1;
73
74         transport->event.fde = event_add_fd(transport->event.ctx, &fde);
75
76         return transport;
77 }
78
79 /*
80   decrease reference count on a transport, and destroy if it becomes
81   zero
82 */
83 void smbcli_transport_close(struct smbcli_transport *transport)
84 {
85         transport->reference_count--;
86         if (transport->reference_count <= 0) {
87                 smbcli_sock_close(transport->socket);
88                 event_remove_fd(transport->event.ctx, transport->event.fde);
89                 event_remove_timed(transport->event.ctx, transport->event.te);
90                 event_context_destroy(transport->event.ctx);
91                 talloc_destroy(transport->mem_ctx);
92         }
93 }
94
95 /*
96   mark the transport as dead
97 */
98 void smbcli_transport_dead(struct smbcli_transport *transport)
99 {
100         smbcli_sock_dead(transport->socket);
101
102         /* all pending sends become errors */
103         while (transport->pending_send) {
104                 struct smbcli_request *req = transport->pending_send;
105                 req->state = SMBCLI_REQUEST_ERROR;
106                 req->status = NT_STATUS_NET_WRITE_FAULT;
107                 DLIST_REMOVE(transport->pending_send, req);
108                 if (req->async.fn) {
109                         req->async.fn(req);
110                 }
111         }
112
113         /* as do all pending receives */
114         while (transport->pending_recv) {
115                 struct smbcli_request *req = transport->pending_recv;
116                 req->state = SMBCLI_REQUEST_ERROR;
117                 req->status = NT_STATUS_NET_WRITE_FAULT;
118                 DLIST_REMOVE(transport->pending_recv, req);
119                 if (req->async.fn) {
120                         req->async.fn(req);
121                 }
122         }
123 }
124
125
126 /*
127   enable select for write on a transport
128 */
129 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
130 {
131         transport->event.fde->flags |= EVENT_FD_WRITE;
132 }
133
134 /*
135   disable select for write on a transport
136 */
137 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
138 {
139         transport->event.fde->flags &= ~EVENT_FD_WRITE;
140 }
141
142 /****************************************************************************
143 send a session request (if appropriate)
144 ****************************************************************************/
145 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
146                            struct nmb_name *calling, 
147                            struct nmb_name *called)
148 {
149         char *p;
150         int len = NBT_HDR_SIZE;
151         struct smbcli_request *req;
152
153         if (called) {
154                 transport->called = *called;
155         }
156
157         /* 445 doesn't have session request */
158         if (transport->socket->port == 445) {
159                 return True;
160         }
161
162         /* allocate output buffer */
163         req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
164
165         /* put in the destination name */
166         p = req->out.buffer + NBT_HDR_SIZE;
167         name_mangle(called->name, p, called->name_type);
168         len += name_len(p);
169
170         /* and my name */
171         p = req->out.buffer+len;
172         name_mangle(calling->name, p, calling->name_type);
173         len += name_len(p);
174
175         _smb_setlen(req->out.buffer,len-4);
176         SCVAL(req->out.buffer,0,0x81);
177
178         if (!smbcli_request_send(req) ||
179             !smbcli_request_receive(req)) {
180                 smbcli_request_destroy(req);
181                 return False;
182         }
183         
184         if (CVAL(req->in.buffer,0) != 0x82) {
185                 transport->error.etype = ETYPE_NBT;
186                 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
187                 smbcli_request_destroy(req);
188                 return False;
189         }
190
191         smbcli_request_destroy(req);
192         return True;
193 }
194
195
196 /****************************************************************************
197 get next mid in sequence
198 ****************************************************************************/
199 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
200 {
201         uint16_t mid;
202         struct smbcli_request *req;
203
204         mid = transport->next_mid;
205
206 again:
207         /* now check to see if this mid is being used by one of the 
208            pending requests. This is quite efficient because the list is
209            usually very short */
210
211         /* the zero mid is reserved for requests that don't have a mid */
212         if (mid == 0) mid = 1;
213
214         for (req=transport->pending_recv; req; req=req->next) {
215                 if (req->mid == mid) {
216                         mid++;
217                         goto again;
218                 }
219         }
220
221         transport->next_mid = mid+1;
222         return mid;
223 }
224
225 static void idle_handler(struct event_context *ev, 
226                          struct timed_event *te, time_t t)
227 {
228         struct smbcli_transport *transport = te->private;
229         te->next_event = t + transport->idle.period;
230         transport->idle.func(transport, transport->idle.private);
231 }
232
233 /*
234   setup the idle handler for a transport
235   the period is in seconds
236 */
237 void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
238                                 void (*idle_func)(struct smbcli_transport *, void *),
239                                 uint_t period,
240                                 void *private)
241 {
242         struct timed_event te;
243         transport->idle.func = idle_func;
244         transport->idle.private = private;
245         transport->idle.period = period;
246
247         if (transport->event.te != NULL) {
248                 event_remove_timed(transport->event.ctx, transport->event.te);
249         }
250
251         te.next_event = time(NULL) + period;
252         te.handler = idle_handler;
253         te.private = transport;
254         transport->event.te = event_add_timed(transport->event.ctx, &te);
255 }
256
257 /*
258   process some pending sends
259 */
260 static void smbcli_transport_process_send(struct smbcli_transport *transport)
261 {
262         while (transport->pending_send) {
263                 struct smbcli_request *req = transport->pending_send;
264                 ssize_t ret;
265                 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
266                 if (ret == -1) {
267                         if (errno == EAGAIN || errno == EINTR) {
268                                 return;
269                         }
270                         smbcli_transport_dead(transport);
271                 }
272                 req->out.buffer += ret;
273                 req->out.size -= ret;
274                 if (req->out.size == 0) {
275                         DLIST_REMOVE(transport->pending_send, req);
276                         if (req->one_way_request) {
277                                 req->state = SMBCLI_REQUEST_DONE;
278                                 smbcli_request_destroy(req);
279                         } else {
280                                 req->state = SMBCLI_REQUEST_RECV;
281                                 DLIST_ADD(transport->pending_recv, req);
282                         }
283                 }
284         }
285
286         /* we're out of requests to send, so don't wait for write
287            events any more */
288         smbcli_transport_write_disable(transport);
289 }
290
291 /*
292   we have a full request in our receive buffer - match it to a pending request
293   and process
294  */
295 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
296 {
297         uint8_t *buffer, *hdr, *vwv;
298         int len;
299         uint16_t wct, mid = 0;
300         struct smbcli_request *req;
301
302         buffer = transport->recv_buffer.buffer;
303         len = transport->recv_buffer.req_size;
304
305         ZERO_STRUCT(transport->recv_buffer);
306
307         hdr = buffer+NBT_HDR_SIZE;
308         vwv = hdr + HDR_VWV;
309
310         /* see if it could be an oplock break request */
311         if (handle_oplock_break(transport, len, hdr, vwv)) {
312                 talloc_free(transport->mem_ctx, buffer);
313                 return;
314         }
315
316         /* at this point we need to check for a readbraw reply, as
317            these can be any length */
318         if (transport->readbraw_pending) {
319                 transport->readbraw_pending = 0;
320
321                 /* it must match the first entry in the pending queue
322                    as the client is not allowed to have outstanding
323                    readbraw requests */
324                 req = transport->pending_recv;
325                 if (!req) goto error;
326
327                 req->in.buffer = buffer;
328                 talloc_steal(transport->mem_ctx, req->mem_ctx, buffer);
329                 req->in.size = len;
330                 req->in.allocated = req->in.size;
331                 goto async;
332         }
333
334         if (len >= MIN_SMB_SIZE) {
335                 /* extract the mid for matching to pending requests */
336                 mid = SVAL(hdr, HDR_MID);
337                 wct = CVAL(hdr, HDR_WCT);
338         }
339
340         /* match the incoming request against the list of pending requests */
341         for (req=transport->pending_recv; req; req=req->next) {
342                 if (req->mid == mid) break;
343         }
344
345         if (!req) {
346                 DEBUG(1,("Discarding unmatched reply with mid %d\n", mid));
347                 goto error;
348         }
349
350         /* fill in the 'in' portion of the matching request */
351         req->in.buffer = buffer;
352         talloc_steal(transport->mem_ctx, req->mem_ctx, buffer);
353         req->in.size = len;
354         req->in.allocated = req->in.size;
355
356         /* handle NBT session replies */
357         if (req->in.size >= 4 && req->in.buffer[0] != 0) {
358                 req->status = NT_STATUS_OK;
359                 goto async;
360         }
361
362         /* handle non-SMB replies */
363         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
364                 req->state = SMBCLI_REQUEST_ERROR;
365                 goto error;
366         }
367
368         if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
369                 DEBUG(2,("bad reply size for mid %d\n", mid));
370                 req->status = NT_STATUS_UNSUCCESSFUL;
371                 req->state = SMBCLI_REQUEST_ERROR;
372                 goto error;
373         }
374
375         req->in.hdr = hdr;
376         req->in.vwv = vwv;
377         req->in.wct = wct;
378         if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
379                 req->in.data = req->in.vwv + VWV(wct) + 2;
380                 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
381                 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
382                         DEBUG(3,("bad data size for mid %d\n", mid));
383                         /* blergh - w2k3 gives a bogus data size values in some
384                            openX replies */
385                         req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
386                 }
387         }
388         req->in.ptr = req->in.data;
389         req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
390
391         if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
392                 transport->error.etype = ETYPE_DOS;
393                 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
394                 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
395                 req->status = dos_to_ntstatus(transport->error.e.dos.eclass, 
396                                               transport->error.e.dos.ecode);
397         } else {
398                 transport->error.etype = ETYPE_NT;
399                 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
400                 req->status = transport->error.e.nt_status;
401         }
402
403         if (!smbcli_request_check_sign_mac(req)) {
404                 transport->error.etype = ETYPE_SOCKET;
405                 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
406                 req->state = SMBCLI_REQUEST_ERROR;
407                 goto error;
408         };
409
410 async:
411         /* if this request has an async handler then call that to
412            notify that the reply has been received. This might destroy
413            the request so it must happen last */
414         DLIST_REMOVE(transport->pending_recv, req);
415         req->state = SMBCLI_REQUEST_DONE;
416         if (req->async.fn) {
417                 req->async.fn(req);
418         }
419         return;
420
421 error:
422         if (req) {
423                 DLIST_REMOVE(transport->pending_recv, req);
424                 req->state = SMBCLI_REQUEST_ERROR;
425         }
426 }
427
428 /*
429   process some pending receives
430 */
431 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
432 {
433         /* a incoming packet goes through 2 stages - first we read the
434            4 byte header, which tells us how much more is coming. Then
435            we read the rest */
436         if (transport->recv_buffer.received < NBT_HDR_SIZE) {
437                 ssize_t ret;
438                 ret = smbcli_sock_read(transport->socket, 
439                                     transport->recv_buffer.header + 
440                                     transport->recv_buffer.received,
441                                     NBT_HDR_SIZE - transport->recv_buffer.received);
442                 if (ret == 0) {
443                         smbcli_transport_dead(transport);
444                         return;
445                 }
446                 if (ret == -1) {
447                         if (errno == EINTR || errno == EAGAIN) {
448                                 return;
449                         }
450                         smbcli_transport_dead(transport);
451                         return;
452                 }
453
454                 transport->recv_buffer.received += ret;
455
456                 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
457                         /* we've got a full header */
458                         transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
459                         transport->recv_buffer.buffer = talloc(transport->mem_ctx,
460                                                                NBT_HDR_SIZE+transport->recv_buffer.req_size);
461                         if (transport->recv_buffer.buffer == NULL) {
462                                 smbcli_transport_dead(transport);
463                                 return;
464                         }
465                         memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
466                 }
467         }
468
469         if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
470                 ssize_t ret;
471                 ret = smbcli_sock_read(transport->socket, 
472                                     transport->recv_buffer.buffer + 
473                                     transport->recv_buffer.received,
474                                     transport->recv_buffer.req_size - 
475                                     transport->recv_buffer.received);
476                 if (ret == -1) {
477                         if (errno == EINTR || errno == EAGAIN) {
478                                 return;
479                         }
480                         smbcli_transport_dead(transport);
481                         return;
482                 }
483                 transport->recv_buffer.received += ret;
484         }
485
486         if (transport->recv_buffer.received != 0 &&
487             transport->recv_buffer.received == transport->recv_buffer.req_size) {
488                 smbcli_transport_finish_recv(transport);
489         }
490 }
491
492 /*
493   process some read/write requests that are pending
494   return False if the socket is dead
495 */
496 BOOL smbcli_transport_process(struct smbcli_transport *transport)
497 {
498         smbcli_transport_process_send(transport);
499         smbcli_transport_process_recv(transport);
500         if (transport->socket->fd == -1) {
501                 return False;
502         }
503         return True;
504 }
505
506
507
508 /*
509   put a request into the send queue
510 */
511 void smbcli_transport_send(struct smbcli_request *req)
512 {
513         /* check if the transport is dead */
514         if (req->transport->socket->fd == -1) {
515                 req->state = SMBCLI_REQUEST_ERROR;
516                 req->status = NT_STATUS_NET_WRITE_FAULT;
517                 return;
518         }
519
520         /* put it on the outgoing socket queue */
521         req->state = SMBCLI_REQUEST_SEND;
522         DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
523
524         /* make sure we look for write events */
525         smbcli_transport_write_enable(req->transport);
526 }