2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
4 Copyright (C) Andrew Tridgell 1994-2003
5 Copyright (C) James Myers 2003 <myersjj@samba.org>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
26 static void smbcli_transport_process_send(struct smbcli_transport *transport);
29 an event has happened on the socket
31 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde,
32 time_t t, uint16_t flags)
34 struct smbcli_transport *transport = fde->private;
36 if (flags & EVENT_FD_READ) {
37 smbcli_transport_process_recv(transport);
39 if (flags & EVENT_FD_WRITE) {
40 smbcli_transport_process_send(transport);
47 static int transport_destructor(void *ptr)
49 struct smbcli_transport *transport = ptr;
51 smbcli_transport_dead(transport);
52 event_remove_fd(transport->event.ctx, transport->event.fde);
53 event_remove_timed(transport->event.ctx, transport->event.te);
58 create a transport structure based on an established socket
60 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
62 struct smbcli_transport *transport;
65 transport = talloc_p(sock, struct smbcli_transport);
66 if (!transport) return NULL;
68 ZERO_STRUCTP(transport);
70 transport->event.ctx = event_context_init(transport);
71 if (transport->event.ctx == NULL) {
72 talloc_free(transport);
76 transport->socket = talloc_reference(transport, sock);
77 transport->negotiate.protocol = PROTOCOL_NT1;
78 transport->options.use_spnego = lp_use_spnego();
79 transport->options.max_xmit = lp_max_xmit();
80 transport->options.max_mux = lp_maxmux();
82 transport->negotiate.max_xmit = transport->options.max_xmit;
84 smbcli_init_signing(transport);
86 ZERO_STRUCT(transport->called);
88 fde.fd = socket_get_fd(sock->sock);
89 fde.flags = EVENT_FD_READ;
90 fde.handler = smbcli_transport_event_handler;
91 fde.private = transport;
94 transport->event.fde = event_add_fd(transport->event.ctx, &fde);
96 talloc_set_destructor(transport, transport_destructor);
102 mark the transport as dead
104 void smbcli_transport_dead(struct smbcli_transport *transport)
106 smbcli_sock_dead(transport->socket);
108 /* all pending sends become errors */
109 while (transport->pending_send) {
110 struct smbcli_request *req = transport->pending_send;
111 req->state = SMBCLI_REQUEST_ERROR;
112 req->status = NT_STATUS_NET_WRITE_FAULT;
113 DLIST_REMOVE(transport->pending_send, req);
119 /* as do all pending receives */
120 while (transport->pending_recv) {
121 struct smbcli_request *req = transport->pending_recv;
122 req->state = SMBCLI_REQUEST_ERROR;
123 req->status = NT_STATUS_NET_WRITE_FAULT;
124 DLIST_REMOVE(transport->pending_recv, req);
133 enable select for write on a transport
135 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
137 transport->event.fde->flags |= EVENT_FD_WRITE;
141 disable select for write on a transport
143 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
145 transport->event.fde->flags &= ~EVENT_FD_WRITE;
148 /****************************************************************************
149 send a session request (if appropriate)
150 ****************************************************************************/
151 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
152 struct nmb_name *calling,
153 struct nmb_name *called)
156 int len = NBT_HDR_SIZE;
157 struct smbcli_request *req;
160 transport->called = *called;
163 /* 445 doesn't have session request */
164 if (transport->socket->port == 445) {
168 /* allocate output buffer */
169 req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
171 /* put in the destination name */
172 p = req->out.buffer + NBT_HDR_SIZE;
173 name_mangle(called->name, p, called->name_type);
177 p = req->out.buffer+len;
178 name_mangle(calling->name, p, calling->name_type);
181 _smb_setlen(req->out.buffer,len-4);
182 SCVAL(req->out.buffer,0,0x81);
184 if (!smbcli_request_send(req) ||
185 !smbcli_request_receive(req)) {
186 smbcli_request_destroy(req);
190 if (CVAL(req->in.buffer,0) != 0x82) {
191 transport->error.etype = ETYPE_NBT;
192 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
193 smbcli_request_destroy(req);
197 smbcli_request_destroy(req);
202 /****************************************************************************
203 get next mid in sequence
204 ****************************************************************************/
205 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
208 struct smbcli_request *req;
210 mid = transport->next_mid;
213 /* now check to see if this mid is being used by one of the
214 pending requests. This is quite efficient because the list is
215 usually very short */
217 /* the zero mid is reserved for requests that don't have a mid */
218 if (mid == 0) mid = 1;
220 for (req=transport->pending_recv; req; req=req->next) {
221 if (req->mid == mid) {
227 transport->next_mid = mid+1;
231 static void idle_handler(struct event_context *ev,
232 struct timed_event *te, time_t t)
234 struct smbcli_transport *transport = te->private;
235 te->next_event = t + transport->idle.period;
236 transport->idle.func(transport, transport->idle.private);
240 setup the idle handler for a transport
241 the period is in seconds
243 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
244 void (*idle_func)(struct smbcli_transport *, void *),
248 struct timed_event te;
249 transport->idle.func = idle_func;
250 transport->idle.private = private;
251 transport->idle.period = period;
253 if (transport->event.te != NULL) {
254 event_remove_timed(transport->event.ctx, transport->event.te);
257 te.next_event = time(NULL) + period;
258 te.handler = idle_handler;
259 te.private = transport;
260 transport->event.te = event_add_timed(transport->event.ctx, &te);
264 process some pending sends
266 static void smbcli_transport_process_send(struct smbcli_transport *transport)
268 while (transport->pending_send) {
269 struct smbcli_request *req = transport->pending_send;
271 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
273 if (errno == EAGAIN || errno == EINTR) {
276 smbcli_transport_dead(transport);
279 req->out.buffer += ret;
280 req->out.size -= ret;
281 if (req->out.size == 0) {
282 DLIST_REMOVE(transport->pending_send, req);
283 if (req->one_way_request) {
284 req->state = SMBCLI_REQUEST_DONE;
285 smbcli_request_destroy(req);
287 req->state = SMBCLI_REQUEST_RECV;
288 DLIST_ADD(transport->pending_recv, req);
293 /* we're out of requests to send, so don't wait for write
295 smbcli_transport_write_disable(transport);
299 we have a full request in our receive buffer - match it to a pending request
302 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
304 uint8_t *buffer, *hdr, *vwv;
306 uint16_t wct=0, mid = 0;
307 struct smbcli_request *req;
309 buffer = transport->recv_buffer.buffer;
310 len = transport->recv_buffer.req_size;
312 ZERO_STRUCT(transport->recv_buffer);
314 hdr = buffer+NBT_HDR_SIZE;
317 /* see if it could be an oplock break request */
318 if (handle_oplock_break(transport, len, hdr, vwv)) {
323 /* at this point we need to check for a readbraw reply, as
324 these can be any length */
325 if (transport->readbraw_pending) {
326 transport->readbraw_pending = 0;
328 /* it must match the first entry in the pending queue
329 as the client is not allowed to have outstanding
331 req = transport->pending_recv;
332 if (!req) goto error;
334 req->in.buffer = buffer;
335 talloc_steal(req, buffer);
337 req->in.allocated = req->in.size;
341 if (len >= MIN_SMB_SIZE) {
342 /* extract the mid for matching to pending requests */
343 mid = SVAL(hdr, HDR_MID);
344 wct = CVAL(hdr, HDR_WCT);
347 /* match the incoming request against the list of pending requests */
348 for (req=transport->pending_recv; req; req=req->next) {
349 if (req->mid == mid) break;
353 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n",
354 mid, CVAL(hdr, HDR_COM)));
358 /* fill in the 'in' portion of the matching request */
359 req->in.buffer = buffer;
360 talloc_steal(req, buffer);
362 req->in.allocated = req->in.size;
364 /* handle NBT session replies */
365 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
366 req->status = NT_STATUS_OK;
370 /* handle non-SMB replies */
371 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
372 req->state = SMBCLI_REQUEST_ERROR;
376 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
377 DEBUG(2,("bad reply size for mid %d\n", mid));
378 req->status = NT_STATUS_UNSUCCESSFUL;
379 req->state = SMBCLI_REQUEST_ERROR;
386 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
387 req->in.data = req->in.vwv + VWV(wct) + 2;
388 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
389 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
390 DEBUG(3,("bad data size for mid %d\n", mid));
391 /* blergh - w2k3 gives a bogus data size values in some
393 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
396 req->in.ptr = req->in.data;
397 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
399 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
400 transport->error.etype = ETYPE_DOS;
401 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
402 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
403 req->status = dos_to_ntstatus(transport->error.e.dos.eclass,
404 transport->error.e.dos.ecode);
406 transport->error.etype = ETYPE_NT;
407 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
408 req->status = transport->error.e.nt_status;
411 if (!smbcli_request_check_sign_mac(req)) {
412 transport->error.etype = ETYPE_SOCKET;
413 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
414 req->state = SMBCLI_REQUEST_ERROR;
419 /* if this request has an async handler then call that to
420 notify that the reply has been received. This might destroy
421 the request so it must happen last */
422 DLIST_REMOVE(transport->pending_recv, req);
423 req->state = SMBCLI_REQUEST_DONE;
431 DLIST_REMOVE(transport->pending_recv, req);
432 req->state = SMBCLI_REQUEST_ERROR;
437 process some pending receives
439 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
441 /* a incoming packet goes through 2 stages - first we read the
442 4 byte header, which tells us how much more is coming. Then
444 if (transport->recv_buffer.received < NBT_HDR_SIZE) {
446 ret = smbcli_sock_read(transport->socket,
447 transport->recv_buffer.header +
448 transport->recv_buffer.received,
449 NBT_HDR_SIZE - transport->recv_buffer.received);
451 smbcli_transport_dead(transport);
455 transport->recv_buffer.received += ret;
457 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
458 /* we've got a full header */
459 transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
460 transport->recv_buffer.buffer = talloc(transport,
461 NBT_HDR_SIZE+transport->recv_buffer.req_size);
462 if (transport->recv_buffer.buffer == NULL) {
463 smbcli_transport_dead(transport);
466 memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
470 if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
472 ret = smbcli_sock_read(transport->socket,
473 transport->recv_buffer.buffer +
474 transport->recv_buffer.received,
475 transport->recv_buffer.req_size -
476 transport->recv_buffer.received);
478 smbcli_transport_dead(transport);
481 transport->recv_buffer.received += ret;
484 if (transport->recv_buffer.received != 0 &&
485 transport->recv_buffer.received == transport->recv_buffer.req_size) {
486 smbcli_transport_finish_recv(transport);
491 process some read/write requests that are pending
492 return False if the socket is dead
494 BOOL smbcli_transport_process(struct smbcli_transport *transport)
496 smbcli_transport_process_send(transport);
497 smbcli_transport_process_recv(transport);
498 if (transport->socket->sock == NULL) {
507 put a request into the send queue
509 void smbcli_transport_send(struct smbcli_request *req)
511 /* check if the transport is dead */
512 if (req->transport->socket->sock == NULL) {
513 req->state = SMBCLI_REQUEST_ERROR;
514 req->status = NT_STATUS_NET_WRITE_FAULT;
518 /* put it on the outgoing socket queue */
519 req->state = SMBCLI_REQUEST_SEND;
520 DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
522 /* make sure we look for write events */
523 smbcli_transport_write_enable(req->transport);