2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include "libcli/raw/libcliraw.h"
25 #include "system/time.h"
26 #include "dlinklist.h"
30 static void smbcli_transport_process_recv(struct smbcli_transport *transport);
31 static void smbcli_transport_process_send(struct smbcli_transport *transport);
34 an event has happened on the socket
36 static void smbcli_transport_event_handler(struct event_context *ev,
38 uint16_t flags, void *private)
40 struct smbcli_transport *transport = talloc_get_type(private,
41 struct smbcli_transport);
42 if (flags & EVENT_FD_READ) {
43 smbcli_transport_process_recv(transport);
46 if (flags & EVENT_FD_WRITE) {
47 smbcli_transport_process_send(transport);
54 static int transport_destructor(void *ptr)
56 struct smbcli_transport *transport = ptr;
58 smbcli_transport_dead(transport);
63 create a transport structure based on an established socket
65 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
66 TALLOC_CTX *parent_ctx, BOOL primary)
68 struct smbcli_transport *transport;
70 transport = talloc_zero(parent_ctx, struct smbcli_transport);
71 if (!transport) return NULL;
74 transport->socket = talloc_steal(transport, sock);
76 transport->socket = talloc_reference(transport, sock);
78 transport->negotiate.protocol = PROTOCOL_NT1;
79 transport->options.use_spnego = lp_use_spnego();
80 transport->options.max_xmit = lp_max_xmit();
81 transport->options.max_mux = lp_maxmux();
83 transport->negotiate.max_xmit = transport->options.max_xmit;
85 smbcli_init_signing(transport);
87 ZERO_STRUCT(transport->called);
89 /* take over event handling from the socket layer - it only
90 handles events up until we are connected */
91 talloc_free(transport->socket->event.fde);
92 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
94 socket_get_fd(transport->socket->sock),
96 smbcli_transport_event_handler,
99 talloc_set_destructor(transport, transport_destructor);
105 mark the transport as dead
107 void smbcli_transport_dead(struct smbcli_transport *transport)
109 smbcli_sock_dead(transport->socket);
111 /* all pending sends become errors */
112 while (transport->pending_send) {
113 struct smbcli_request *req = transport->pending_send;
114 req->state = SMBCLI_REQUEST_ERROR;
115 req->status = NT_STATUS_NET_WRITE_FAULT;
116 DLIST_REMOVE(transport->pending_send, req);
122 /* as do all pending receives */
123 while (transport->pending_recv) {
124 struct smbcli_request *req = transport->pending_recv;
125 req->state = SMBCLI_REQUEST_ERROR;
126 req->status = NT_STATUS_NET_WRITE_FAULT;
127 DLIST_REMOVE(transport->pending_recv, req);
136 enable select for write on a transport
138 static void smbcli_transport_write_enable(struct smbcli_transport *transport)
140 struct fd_event *fde = transport->socket->event.fde;
141 EVENT_FD_WRITEABLE(fde);
145 disable select for write on a transport
147 static void smbcli_transport_write_disable(struct smbcli_transport *transport)
149 struct fd_event *fde = transport->socket->event.fde;
150 EVENT_FD_NOT_WRITEABLE(fde);
154 send a session request
156 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
157 struct nbt_name *calling,
158 struct nbt_name *called)
161 struct smbcli_request *req;
162 DATA_BLOB calling_blob, called_blob;
163 TALLOC_CTX *tmp_ctx = talloc_new(transport);
166 status = nbt_name_dup(transport, called, &transport->called);
167 if (!NT_STATUS_IS_OK(status)) goto failed;
169 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
170 if (!NT_STATUS_IS_OK(status)) goto failed;
172 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
173 if (!NT_STATUS_IS_OK(status)) goto failed;
175 /* allocate output buffer */
176 req = smbcli_request_setup_nonsmb(transport,
178 calling_blob.length + called_blob.length);
179 if (req == NULL) goto failed;
181 /* put in the destination name */
182 p = req->out.buffer + NBT_HDR_SIZE;
183 memcpy(p, called_blob.data, called_blob.length);
184 p += called_blob.length;
186 memcpy(p, calling_blob.data, calling_blob.length);
187 p += calling_blob.length;
189 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer)-4);
190 SCVAL(req->out.buffer,0,0x81);
192 if (!smbcli_request_send(req)) {
193 smbcli_request_destroy(req);
197 talloc_free(tmp_ctx);
201 talloc_free(tmp_ctx);
206 map a session request error to a NTSTATUS
208 static NTSTATUS map_session_refused_error(uint8_t error)
213 return NT_STATUS_REMOTE_NOT_LISTENING;
215 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
217 return NT_STATUS_REMOTE_RESOURCES;
219 return NT_STATUS_UNEXPECTED_IO_ERROR;
224 finish a smbcli_transport_connect()
226 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
230 if (!smbcli_request_receive(req)) {
231 smbcli_request_destroy(req);
232 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
235 switch (CVAL(req->in.buffer,0)) {
237 status = NT_STATUS_OK;
240 status = map_session_refused_error(CVAL(req->in.buffer,4));
243 DEBUG(1,("Warning: session retarget not supported\n"));
244 status = NT_STATUS_NOT_SUPPORTED;
247 status = NT_STATUS_UNEXPECTED_IO_ERROR;
251 smbcli_request_destroy(req);
257 send a session request (if needed)
259 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
260 struct nbt_name *calling,
261 struct nbt_name *called)
263 struct smbcli_request *req;
266 if (transport->socket->port == 445) {
270 req = smbcli_transport_connect_send(transport,
272 status = smbcli_transport_connect_recv(req);
273 return NT_STATUS_IS_OK(status);
276 /****************************************************************************
277 get next mid in sequence
278 ****************************************************************************/
279 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
282 struct smbcli_request *req;
284 mid = transport->next_mid;
287 /* now check to see if this mid is being used by one of the
288 pending requests. This is quite efficient because the list is
289 usually very short */
291 /* the zero mid is reserved for requests that don't have a mid */
292 if (mid == 0) mid = 1;
294 for (req=transport->pending_recv; req; req=req->next) {
295 if (req->mid == mid) {
301 transport->next_mid = mid+1;
305 static void idle_handler(struct event_context *ev,
306 struct timed_event *te, struct timeval t, void *private)
308 struct smbcli_transport *transport = talloc_get_type(private,
309 struct smbcli_transport);
310 struct timeval next = timeval_add(&t, 0, transport->idle.period);
311 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
314 idle_handler, transport);
315 transport->idle.func(transport, transport->idle.private);
319 setup the idle handler for a transport
320 the period is in microseconds
322 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
323 void (*idle_func)(struct smbcli_transport *, void *),
327 transport->idle.func = idle_func;
328 transport->idle.private = private;
329 transport->idle.period = period;
331 if (transport->socket->event.te != NULL) {
332 talloc_free(transport->socket->event.te);
335 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
337 timeval_current_ofs(0, period),
338 idle_handler, transport);
342 process some pending sends
344 static void smbcli_transport_process_send(struct smbcli_transport *transport)
346 while (transport->pending_send) {
347 struct smbcli_request *req = transport->pending_send;
349 ret = smbcli_sock_write(transport->socket, req->out.buffer, req->out.size);
351 if (errno == EAGAIN || errno == EINTR) {
354 smbcli_transport_dead(transport);
357 req->out.buffer += ret;
358 req->out.size -= ret;
359 if (req->out.size == 0) {
360 DLIST_REMOVE(transport->pending_send, req);
361 if (req->one_way_request) {
362 req->state = SMBCLI_REQUEST_DONE;
363 smbcli_request_destroy(req);
365 req->state = SMBCLI_REQUEST_RECV;
366 DLIST_ADD(transport->pending_recv, req);
371 /* we're out of requests to send, so don't wait for write
373 smbcli_transport_write_disable(transport);
377 we have a full request in our receive buffer - match it to a pending request
380 static void smbcli_transport_finish_recv(struct smbcli_transport *transport)
382 uint8_t *buffer, *hdr, *vwv;
384 uint16_t wct=0, mid = 0;
385 struct smbcli_request *req;
387 buffer = transport->recv_buffer.buffer;
388 len = transport->recv_buffer.req_size;
390 ZERO_STRUCT(transport->recv_buffer);
392 hdr = buffer+NBT_HDR_SIZE;
395 /* see if it could be an oplock break request */
396 if (handle_oplock_break(transport, len, hdr, vwv)) {
401 /* at this point we need to check for a readbraw reply, as
402 these can be any length */
403 if (transport->readbraw_pending) {
404 transport->readbraw_pending = 0;
406 /* it must match the first entry in the pending queue
407 as the client is not allowed to have outstanding
409 req = transport->pending_recv;
410 if (!req) goto error;
412 req->in.buffer = buffer;
413 talloc_steal(req, buffer);
415 req->in.allocated = req->in.size;
419 if (len >= MIN_SMB_SIZE) {
420 /* extract the mid for matching to pending requests */
421 mid = SVAL(hdr, HDR_MID);
422 wct = CVAL(hdr, HDR_WCT);
425 /* match the incoming request against the list of pending requests */
426 for (req=transport->pending_recv; req; req=req->next) {
427 if (req->mid == mid) break;
431 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n",
432 mid, CVAL(hdr, HDR_COM)));
436 /* fill in the 'in' portion of the matching request */
437 req->in.buffer = buffer;
438 talloc_steal(req, buffer);
440 req->in.allocated = req->in.size;
442 /* handle NBT session replies */
443 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
444 req->status = NT_STATUS_OK;
448 /* handle non-SMB replies */
449 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
450 req->state = SMBCLI_REQUEST_ERROR;
454 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
455 DEBUG(2,("bad reply size for mid %d\n", mid));
456 req->status = NT_STATUS_UNSUCCESSFUL;
457 req->state = SMBCLI_REQUEST_ERROR;
464 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
465 req->in.data = req->in.vwv + VWV(wct) + 2;
466 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
467 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
468 DEBUG(3,("bad data size for mid %d\n", mid));
469 /* blergh - w2k3 gives a bogus data size values in some
471 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
474 req->in.ptr = req->in.data;
475 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
477 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
478 transport->error.etype = ETYPE_DOS;
479 transport->error.e.dos.eclass = CVAL(req->in.hdr,HDR_RCLS);
480 transport->error.e.dos.ecode = SVAL(req->in.hdr,HDR_ERR);
481 req->status = dos_to_ntstatus(transport->error.e.dos.eclass,
482 transport->error.e.dos.ecode);
484 transport->error.etype = ETYPE_NT;
485 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
486 req->status = transport->error.e.nt_status;
489 if (!smbcli_request_check_sign_mac(req)) {
490 transport->error.etype = ETYPE_SOCKET;
491 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
492 req->state = SMBCLI_REQUEST_ERROR;
493 req->status = NT_STATUS_ACCESS_DENIED;
498 /* if this request has an async handler then call that to
499 notify that the reply has been received. This might destroy
500 the request so it must happen last */
501 DLIST_REMOVE(transport->pending_recv, req);
502 req->state = SMBCLI_REQUEST_DONE;
510 DLIST_REMOVE(transport->pending_recv, req);
511 req->state = SMBCLI_REQUEST_ERROR;
516 process some pending receives
518 static void smbcli_transport_process_recv(struct smbcli_transport *transport)
520 /* a incoming packet goes through 2 stages - first we read the
521 4 byte header, which tells us how much more is coming. Then
523 if (transport->recv_buffer.received < NBT_HDR_SIZE) {
525 ret = smbcli_sock_read(transport->socket,
526 transport->recv_buffer.header +
527 transport->recv_buffer.received,
528 NBT_HDR_SIZE - transport->recv_buffer.received);
530 smbcli_transport_dead(transport);
534 transport->recv_buffer.received += ret;
536 if (transport->recv_buffer.received == NBT_HDR_SIZE) {
537 /* we've got a full header */
538 transport->recv_buffer.req_size = smb_len(transport->recv_buffer.header) + NBT_HDR_SIZE;
539 transport->recv_buffer.buffer = talloc_size(transport,
540 NBT_HDR_SIZE+transport->recv_buffer.req_size);
541 if (transport->recv_buffer.buffer == NULL) {
542 smbcli_transport_dead(transport);
545 memcpy(transport->recv_buffer.buffer, transport->recv_buffer.header, NBT_HDR_SIZE);
549 if (transport->recv_buffer.received < transport->recv_buffer.req_size) {
551 ret = smbcli_sock_read(transport->socket,
552 transport->recv_buffer.buffer +
553 transport->recv_buffer.received,
554 transport->recv_buffer.req_size -
555 transport->recv_buffer.received);
557 smbcli_transport_dead(transport);
560 transport->recv_buffer.received += ret;
563 if (transport->recv_buffer.received != 0 &&
564 transport->recv_buffer.received == transport->recv_buffer.req_size) {
565 smbcli_transport_finish_recv(transport);
570 process some read/write requests that are pending
571 return False if the socket is dead
573 BOOL smbcli_transport_process(struct smbcli_transport *transport)
575 smbcli_transport_process_send(transport);
576 smbcli_transport_process_recv(transport);
577 if (transport->socket->sock == NULL) {
586 put a request into the send queue
588 void smbcli_transport_send(struct smbcli_request *req)
590 /* check if the transport is dead */
591 if (req->transport->socket->sock == NULL) {
592 req->state = SMBCLI_REQUEST_ERROR;
593 req->status = NT_STATUS_NET_WRITE_FAULT;
597 /* put it on the outgoing socket queue */
598 req->state = SMBCLI_REQUEST_SEND;
599 DLIST_ADD_END(req->transport->pending_send, req, struct smbcli_request *);
601 /* make sure we look for write events */
602 smbcli_transport_write_enable(req->transport);