2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
4 Copyright (C) Andrew Tridgell 1994-2003
5 Copyright (C) James Myers 2003 <myersjj@samba.org>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 an event has happened on the socket
27 static void smbcli_transport_event_handler(struct event_context *ev, struct fd_event *fde,
28 time_t t, uint16_t flags)
30 struct smbcli_transport *transport = fde->private;
32 smbcli_transport_process(transport);
36 create a transport structure based on an established socket
38 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock)
41 struct smbcli_transport *transport;
44 mem_ctx = talloc_init("smbcli_transport");
45 if (!mem_ctx) return NULL;
47 transport = talloc_zero(mem_ctx, sizeof(*transport));
48 if (!transport) return NULL;
50 transport->event.ctx = event_context_init();
51 if (transport->event.ctx == NULL) {
52 talloc_destroy(mem_ctx);
56 transport->mem_ctx = mem_ctx;
57 transport->socket = sock;
58 transport->negotiate.protocol = PROTOCOL_NT1;
59 transport->options.use_spnego = lp_use_spnego();
60 transport->negotiate.max_xmit = ~0;
62 smbcli_init_signing(transport);
64 transport->socket->reference_count++;
66 ZERO_STRUCT(transport->called);
69 fde.flags = EVENT_FD_READ;
70 fde.handler = smbcli_transport_event_handler;
71 fde.private = transport;
74 transport->event.fde = event_add_fd(transport->event.ctx, &fde);
80 decrease reference count on a transport, and destroy if it becomes
83 void smbcli_transport_close(struct smbcli_transport *transport)
85 transport->reference_count--;
86 if (transport->reference_count <= 0) {
87 smbcli_sock_close(transport->socket);
88 event_remove_fd(transport->event.ctx, transport->event.fde);
89 event_remove_timed(transport->event.ctx, transport->event.te);
90 event_context_destroy(transport->event.ctx);
91 talloc_destroy(transport->mem_ctx);
96 mark the transport as dead
98 void smbcli_transport_dead(struct smbcli_transport *transport)
100 smbcli_sock_dead(transport->socket);
102 /* all pending sends become errors */
103 while (transport->pending_send) {
104 struct smbcli_request *req = transport->pending_send;
105 req->state = SMBCLI_REQUEST_ERROR;
106 req->status = NT_STATUS_NET_WRITE_FAULT;
107 DLIST_REMOVE(transport->pending_send, req);
113 /* as do all pending receives */
114 while (transport->pending_recv) {
115 struct smbcli_request *req = transport->pending_recv;
116 req->state = SMBCLI_REQUEST_ERROR;
117 req->status = NT_STATUS_NET_WRITE_FAULT;
118 DLIST_REMOVE(transport->pending_recv, req);
127 enable select for write on a transport
129 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
131 transport->event.fde->flags |= EVENT_FD_WRITE;
135 disable select for write on a transport
137 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
139 transport->event.fde->flags &= ~EVENT_FD_WRITE;
142 /****************************************************************************
143 send a session request (if appropriate)
144 ****************************************************************************/
145 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
146 struct nmb_name *calling,
147 struct nmb_name *called)
150 int len = NBT_HDR_SIZE;
151 struct smbcli_request *req;
154 transport->called = *called;
157 /* 445 doesn't have session request */
158 if (transport->socket->port == 445) {
162 /* allocate output buffer */
163 req = smbcli_request_setup_nonsmb(transport, NBT_HDR_SIZE + 2*nbt_mangled_name_len());
165 /* put in the destination name */
166 p = req->out.buffer + NBT_HDR_SIZE;
167 name_mangle(called->name, p, called->name_type);
171 p = req->out.buffer+len;
172 name_mangle(calling->name, p, calling->name_type);
175 _smb_setlen(req->out.buffer,len-4);
176 SCVAL(req->out.buffer,0,0x81);
178 if (!smbcli_request_send(req) ||
179 !smbcli_request_receive(req)) {
180 smbcli_request_destroy(req);
184 if (CVAL(req->in.buffer,0) != 0x82) {
185 transport->error.etype = ETYPE_NBT;
186 transport->error.e.nbt_error = CVAL(req->in.buffer,4);
187 smbcli_request_destroy(req);
191 smbcli_request_destroy(req);
196 /****************************************************************************
197 get next mid in sequence
198 ****************************************************************************/
199 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
202 struct smbcli_request *req;
204 mid = transport->next_mid;
207 /* now check to see if this mid is being used by one of the
208 pending requests. This is quite efficient because the list is
209 usually very short */
211 /* the zero mid is reserved for requests that don't have a mid */
212 if (mid == 0) mid = 1;
214 for (req=transport->pending_recv; req; req=req->next) {
215 if (req->mid == mid) {
221 transport->next_mid = mid+1;
225 static void idle_handler(struct event_context *ev,
226 struct timed_event *te, time_t t)
228 struct smbcli_transport *transport = te->private;
229 te->next_event = t + transport->idle.period;
230 transport->idle.func(transport, transport->idle.private);
234 setup the idle handler for a transport
235 the period is in seconds
237 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
238 void (*idle_func)(struct smbcli_transport *, void *),
242 struct timed_event te;
243 transport->idle.func = idle_func;
244 transport->idle.private = private;
245 transport->idle.period = period;
247 if (transport->event.te != NULL) {
248 event_remove_timed(transport->event.ctx, transport->event.te);
251 te.next_event = time(NULL) + period;
252 te.handler = idle_handler;
253 te.private = transport;
254 transport->event.te = event_add_timed(transport->event.ctx, &te);
258 process some pending sends
260 static void smbcli_transport_process_send(struct smbcli_transport *transport)
262 while (transport->pending_send) {
263 struct smbcli_request *req = transport->pending_send;
265 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
267 if (errno == EAGAIN || errno == EINTR) {
270 smbcli_transport_dead(transport);
272 req->out.buffer += ret;
273 req->out.size -= ret;
274 if (req->out.size == 0) {
275 DLIST_REMOVE(transport->pending_send, req);
276 if (req->one_way_request) {
277 req->state = SMBCLI_REQUEST_DONE;
278 smbcli_request_destroy(req);
280 req->state = SMBCLI_REQUEST_RECV;
281 DLIST_ADD(transport->pending_recv, req);
286 /* we're out of requests to send, so don't wait for write
288 smbcli_transport_write_disable(transport);
292 we have a full request in our receive buffer - match it to a pending request
295 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
297 uint8_t *buffer, *hdr, *vwv;
299 uint16_t wct, mid = 0;
300 struct smbcli_request *req;
302 buffer = transport->recv_buffer.buffer;
303 len = transport->recv_buffer.req_size;
305 ZERO_STRUCT(transport->recv_buffer);
307 hdr = buffer+NBT_HDR_SIZE;
310 /* see if it could be an oplock break request */
311 if (handle_oplock_break(transport, len, hdr, vwv)) {
312 talloc_free(transport->mem_ctx, buffer);
316 /* at this point we need to check for a readbraw reply, as
317 these can be any length */
318 if (transport->readbraw_pending) {
319 transport->readbraw_pending = 0;
321 /* it must match the first entry in the pending queue
322 as the client is not allowed to have outstanding
324 req = transport->pending_recv;
325 if (!req) goto error;
327 req->in.buffer = buffer;
328 talloc_steal(transport->mem_ctx, req->mem_ctx, buffer);
330 req->in.allocated = req->in.size;
334 if (len >= MIN_SMB_SIZE) {
335 /* extract the mid for matching to pending requests */
336 mid = SVAL(hdr, HDR_MID);
337 wct = CVAL(hdr, HDR_WCT);
340 /* match the incoming request against the list of pending requests */
341 for (req=transport->pending_recv; req; req=req->next) {
342 if (req->mid == mid) break;
346 DEBUG(1,("Discarding unmatched reply with mid %d\n", mid));
350 /* fill in the 'in' portion of the matching request */
351 req->in.buffer = buffer;
352 talloc_steal(transport->mem_ctx, req->mem_ctx, buffer);
354 req->in.allocated = req->in.size;
356 /* handle NBT session replies */
357 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
358 req->status = NT_STATUS_OK;
362 /* handle non-SMB replies */
363 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
364 req->state = SMBCLI_REQUEST_ERROR;
368 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
369 DEBUG(2,("bad reply size for mid %d\n", mid));
370 req->status = NT_STATUS_UNSUCCESSFUL;
371 req->state = SMBCLI_REQUEST_ERROR;
378 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
379 req->in.data = req->in.vwv + VWV(wct) + 2;
380 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
381 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
382 DEBUG(3,("bad data size for mid %d\n", mid));
383 /* blergh - w2k3 gives a bogus data size values in some
385 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
388 req->in.ptr = req->in.data;
389 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
391 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
392 transport->error.etype = ETYPE_DOS;
393 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
394 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
395 req->status = dos_to_ntstatus(transport->error.e.dos.eclass,
396 transport->error.e.dos.ecode);
398 transport->error.etype = ETYPE_NT;
399 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
400 req->status = transport->error.e.nt_status;
403 if (!smbcli_request_check_sign_mac(req)) {
404 transport->error.etype = ETYPE_SOCKET;
405 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
406 req->state = SMBCLI_REQUEST_ERROR;
411 /* if this request has an async handler then call that to
412 notify that the reply has been received. This might destroy
413 the request so it must happen last */
414 DLIST_REMOVE(transport->pending_recv, req);
415 req->state = SMBCLI_REQUEST_DONE;
423 DLIST_REMOVE(transport->pending_recv, req);
424 req->state = SMBCLI_REQUEST_ERROR;
429 process some pending receives
431 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
433 /* a incoming packet goes through 2 stages - first we read the
434 4 byte header, which tells us how much more is coming. Then
436 if (transport->recv_buffer.received < NBT_HDR_SIZE) {
438 ret = smbcli_sock_read(transport->socket,
439 transport->recv_buffer.header +
440 transport->recv_buffer.received,
441 NBT_HDR_SIZE - transport->recv_buffer.received);
443 smbcli_transport_dead(transport);
447 if (errno == EINTR || errno == EAGAIN) {
450 smbcli_transport_dead(transport);
454 transport->recv_buffer.received += ret;
456 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
457 /* we've got a full header */
458 transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
459 transport->recv_buffer.buffer = talloc(transport->mem_ctx,
460 NBT_HDR_SIZE+transport->recv_buffer.req_size);
461 if (transport->recv_buffer.buffer == NULL) {
462 smbcli_transport_dead(transport);
465 memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
469 if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
471 ret = smbcli_sock_read(transport->socket,
472 transport->recv_buffer.buffer +
473 transport->recv_buffer.received,
474 transport->recv_buffer.req_size -
475 transport->recv_buffer.received);
477 if (errno == EINTR || errno == EAGAIN) {
480 smbcli_transport_dead(transport);
483 transport->recv_buffer.received += ret;
486 if (transport->recv_buffer.received != 0 &&
487 transport->recv_buffer.received == transport->recv_buffer.req_size) {
488 smbcli_transport_finish_recv(transport);
493 process some read/write requests that are pending
494 return False if the socket is dead
496 BOOL smbcli_transport_process(struct smbcli_transport *transport)
498 smbcli_transport_process_send(transport);
499 smbcli_transport_process_recv(transport);
500 if (transport->socket->fd == -1) {
509 put a request into the send queue
511 void smbcli_transport_send(struct smbcli_request *req)
513 /* check if the transport is dead */
514 if (req->transport->socket->fd == -1) {
515 req->state = SMBCLI_REQUEST_ERROR;
516 req->status = NT_STATUS_NET_WRITE_FAULT;
520 /* put it on the outgoing socket queue */
521 req->state = SMBCLI_REQUEST_SEND;
522 DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
524 /* make sure we look for write events */
525 smbcli_transport_write_enable(req->transport);