2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "lib/socket/socket.h"
25 #include "lib/util/dlinklist.h"
26 #include "lib/events/events.h"
27 #include "lib/stream/packet.h"
28 #include "librpc/gen_ndr/ndr_nbt.h"
29 #include "param/param.h"
33 an event has happened on the socket
35 static void smbcli_transport_event_handler(struct event_context *ev,
37 uint16_t flags, void *private)
39 struct smbcli_transport *transport = talloc_get_type(private,
40 struct smbcli_transport);
41 if (flags & EVENT_FD_READ) {
42 packet_recv(transport->packet);
45 if (flags & EVENT_FD_WRITE) {
46 packet_queue_run(transport->packet);
53 static int transport_destructor(struct smbcli_transport *transport)
55 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
63 static void smbcli_transport_error(void *private, NTSTATUS status)
65 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
66 smbcli_transport_dead(transport, status);
69 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
72 create a transport structure based on an established socket
74 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
75 TALLOC_CTX *parent_ctx,
80 enum smb_signing_state signing)
82 struct smbcli_transport *transport;
84 transport = talloc_zero(parent_ctx, struct smbcli_transport);
85 if (!transport) return NULL;
88 transport->socket = talloc_steal(transport, sock);
90 transport->socket = talloc_reference(transport, sock);
92 transport->negotiate.protocol = PROTOCOL_NT1;
93 transport->options.use_spnego = use_spnego;
94 transport->options.max_xmit = max_xmit;
95 transport->options.max_mux = max_mux;
96 transport->options.request_timeout = SMB_REQUEST_TIMEOUT;
97 transport->options.signing = signing;
99 transport->negotiate.max_xmit = transport->options.max_xmit;
101 /* setup the stream -> packet parser */
102 transport->packet = packet_init(transport);
103 if (transport->packet == NULL) {
104 talloc_free(transport);
107 packet_set_private(transport->packet, transport);
108 packet_set_socket(transport->packet, transport->socket->sock);
109 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
110 packet_set_full_request(transport->packet, packet_full_request_nbt);
111 packet_set_error_handler(transport->packet, smbcli_transport_error);
112 packet_set_event_context(transport->packet, transport->socket->event.ctx);
113 packet_set_nofree(transport->packet);
115 smbcli_init_signing(transport);
117 ZERO_STRUCT(transport->called);
119 /* take over event handling from the socket layer - it only
120 handles events up until we are connected */
121 talloc_free(transport->socket->event.fde);
122 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
123 transport->socket->sock,
124 socket_get_fd(transport->socket->sock),
126 smbcli_transport_event_handler,
129 packet_set_fde(transport->packet, transport->socket->event.fde);
130 packet_set_serialise(transport->packet);
131 talloc_set_destructor(transport, transport_destructor);
137 mark the transport as dead
139 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
141 smbcli_sock_dead(transport->socket);
143 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
144 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
147 /* kill only the first pending receive - this is so that if
148 that async function frees the connection we don't die trying
149 to use old memory. The caller has to cope with only one
151 if (transport->pending_recv) {
152 struct smbcli_request *req = transport->pending_recv;
153 req->state = SMBCLI_REQUEST_ERROR;
154 req->status = status;
155 DLIST_REMOVE(transport->pending_recv, req);
164 send a session request
166 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
167 struct nbt_name *calling,
168 struct nbt_name *called)
171 struct smbcli_request *req;
172 DATA_BLOB calling_blob, called_blob;
173 TALLOC_CTX *tmp_ctx = talloc_new(transport);
176 status = nbt_name_dup(transport, called, &transport->called);
177 if (!NT_STATUS_IS_OK(status)) goto failed;
179 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
180 if (!NT_STATUS_IS_OK(status)) goto failed;
182 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
183 if (!NT_STATUS_IS_OK(status)) goto failed;
185 /* allocate output buffer */
186 req = smbcli_request_setup_nonsmb(transport,
188 calling_blob.length + called_blob.length);
189 if (req == NULL) goto failed;
191 /* put in the destination name */
192 p = req->out.buffer + NBT_HDR_SIZE;
193 memcpy(p, called_blob.data, called_blob.length);
194 p += called_blob.length;
196 memcpy(p, calling_blob.data, calling_blob.length);
197 p += calling_blob.length;
199 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
200 SCVAL(req->out.buffer,0,0x81);
202 if (!smbcli_request_send(req)) {
203 smbcli_request_destroy(req);
207 talloc_free(tmp_ctx);
211 talloc_free(tmp_ctx);
216 map a session request error to a NTSTATUS
218 static NTSTATUS map_session_refused_error(uint8_t error)
223 return NT_STATUS_REMOTE_NOT_LISTENING;
225 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
227 return NT_STATUS_REMOTE_RESOURCES;
229 return NT_STATUS_UNEXPECTED_IO_ERROR;
234 finish a smbcli_transport_connect()
236 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
240 if (!smbcli_request_receive(req)) {
241 smbcli_request_destroy(req);
242 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
245 switch (CVAL(req->in.buffer,0)) {
247 status = NT_STATUS_OK;
250 status = map_session_refused_error(CVAL(req->in.buffer,4));
253 DEBUG(1,("Warning: session retarget not supported\n"));
254 status = NT_STATUS_NOT_SUPPORTED;
257 status = NT_STATUS_UNEXPECTED_IO_ERROR;
261 smbcli_request_destroy(req);
267 send a session request (if needed)
269 bool smbcli_transport_connect(struct smbcli_transport *transport,
270 struct nbt_name *calling,
271 struct nbt_name *called)
273 struct smbcli_request *req;
276 if (transport->socket->port == 445) {
280 req = smbcli_transport_connect_send(transport,
282 status = smbcli_transport_connect_recv(req);
283 return NT_STATUS_IS_OK(status);
286 /****************************************************************************
287 get next mid in sequence
288 ****************************************************************************/
289 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
292 struct smbcli_request *req;
294 mid = transport->next_mid;
297 /* now check to see if this mid is being used by one of the
298 pending requests. This is quite efficient because the list is
299 usually very short */
301 /* the zero mid is reserved for requests that don't have a mid */
302 if (mid == 0) mid = 1;
304 for (req=transport->pending_recv; req; req=req->next) {
305 if (req->mid == mid) {
311 transport->next_mid = mid+1;
315 static void idle_handler(struct event_context *ev,
316 struct timed_event *te, struct timeval t, void *private)
318 struct smbcli_transport *transport = talloc_get_type(private,
319 struct smbcli_transport);
320 struct timeval next = timeval_add(&t, 0, transport->idle.period);
321 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
324 idle_handler, transport);
325 transport->idle.func(transport, transport->idle.private);
329 setup the idle handler for a transport
330 the period is in microseconds
332 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
333 void (*idle_func)(struct smbcli_transport *, void *),
337 transport->idle.func = idle_func;
338 transport->idle.private = private;
339 transport->idle.period = period;
341 if (transport->socket->event.te != NULL) {
342 talloc_free(transport->socket->event.te);
345 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
347 timeval_current_ofs(0, period),
348 idle_handler, transport);
352 we have a full request in our receive buffer - match it to a pending request
355 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
357 struct smbcli_transport *transport = talloc_get_type(private,
358 struct smbcli_transport);
359 uint8_t *buffer, *hdr, *vwv;
361 uint16_t wct=0, mid = 0, op = 0;
362 struct smbcli_request *req = NULL;
367 hdr = buffer+NBT_HDR_SIZE;
370 /* see if it could be an oplock break request */
371 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
376 /* at this point we need to check for a readbraw reply, as
377 these can be any length */
378 if (transport->readbraw_pending) {
379 transport->readbraw_pending = 0;
381 /* it must match the first entry in the pending queue
382 as the client is not allowed to have outstanding
384 req = transport->pending_recv;
385 if (!req) goto error;
387 req->in.buffer = buffer;
388 talloc_steal(req, buffer);
390 req->in.allocated = req->in.size;
394 if (len >= MIN_SMB_SIZE) {
395 /* extract the mid for matching to pending requests */
396 mid = SVAL(hdr, HDR_MID);
397 wct = CVAL(hdr, HDR_WCT);
398 op = CVAL(hdr, HDR_COM);
401 /* match the incoming request against the list of pending requests */
402 for (req=transport->pending_recv; req; req=req->next) {
403 if (req->mid == mid) break;
406 /* see if it's a ntcancel reply for the current MID */
407 req = smbcli_handle_ntcancel_reply(req, len, hdr);
410 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
414 /* fill in the 'in' portion of the matching request */
415 req->in.buffer = buffer;
416 talloc_steal(req, buffer);
418 req->in.allocated = req->in.size;
420 /* handle NBT session replies */
421 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
422 req->status = NT_STATUS_OK;
426 /* handle non-SMB replies */
427 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
428 req->state = SMBCLI_REQUEST_ERROR;
432 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
433 DEBUG(2,("bad reply size for mid %d\n", mid));
434 req->status = NT_STATUS_UNSUCCESSFUL;
435 req->state = SMBCLI_REQUEST_ERROR;
442 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
443 req->in.data = req->in.vwv + VWV(wct) + 2;
444 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
445 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
446 DEBUG(3,("bad data size for mid %d\n", mid));
447 /* blergh - w2k3 gives a bogus data size values in some
449 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
452 req->in.ptr = req->in.data;
453 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
455 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
456 int class = CVAL(req->in.hdr,HDR_RCLS);
457 int code = SVAL(req->in.hdr,HDR_ERR);
458 if (class == 0 && code == 0) {
459 transport->error.e.nt_status = NT_STATUS_OK;
461 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
464 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
467 req->status = transport->error.e.nt_status;
468 if (NT_STATUS_IS_OK(req->status)) {
469 transport->error.etype = ETYPE_NONE;
471 transport->error.etype = ETYPE_SMB;
474 if (!smbcli_request_check_sign_mac(req)) {
475 transport->error.etype = ETYPE_SOCKET;
476 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
477 req->state = SMBCLI_REQUEST_ERROR;
478 req->status = NT_STATUS_ACCESS_DENIED;
483 /* if this request has an async handler then call that to
484 notify that the reply has been received. This might destroy
485 the request so it must happen last */
486 DLIST_REMOVE(transport->pending_recv, req);
487 req->state = SMBCLI_REQUEST_DONE;
495 DLIST_REMOVE(transport->pending_recv, req);
496 req->state = SMBCLI_REQUEST_ERROR;
507 process some read/write requests that are pending
508 return false if the socket is dead
510 bool smbcli_transport_process(struct smbcli_transport *transport)
515 packet_queue_run(transport->packet);
516 if (transport->socket->sock == NULL) {
520 status = socket_pending(transport->socket->sock, &npending);
521 if (NT_STATUS_IS_OK(status) && npending > 0) {
522 packet_recv(transport->packet);
524 if (transport->socket->sock == NULL) {
531 handle timeouts of individual smb requests
533 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
534 struct timeval t, void *private)
536 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
538 if (req->state == SMBCLI_REQUEST_RECV) {
539 DLIST_REMOVE(req->transport->pending_recv, req);
541 req->status = NT_STATUS_IO_TIMEOUT;
542 req->state = SMBCLI_REQUEST_ERROR;
552 static int smbcli_request_destructor(struct smbcli_request *req)
554 if (req->state == SMBCLI_REQUEST_RECV) {
555 DLIST_REMOVE(req->transport->pending_recv, req);
562 put a request into the send queue
564 void smbcli_transport_send(struct smbcli_request *req)
569 /* check if the transport is dead */
570 if (req->transport->socket->sock == NULL) {
571 req->state = SMBCLI_REQUEST_ERROR;
572 req->status = NT_STATUS_NET_WRITE_FAULT;
576 blob = data_blob_const(req->out.buffer, req->out.size);
577 status = packet_send(req->transport->packet, blob);
578 if (!NT_STATUS_IS_OK(status)) {
579 req->state = SMBCLI_REQUEST_ERROR;
580 req->status = status;
584 if (req->one_way_request) {
585 req->state = SMBCLI_REQUEST_DONE;
586 smbcli_request_destroy(req);
590 req->state = SMBCLI_REQUEST_RECV;
591 DLIST_ADD(req->transport->pending_recv, req);
594 if (req->transport->options.request_timeout) {
595 event_add_timed(req->transport->socket->event.ctx, req,
596 timeval_current_ofs(req->transport->options.request_timeout, 0),
597 smbcli_timeout_handler, req);
600 talloc_set_destructor(req, smbcli_request_destructor);
604 /****************************************************************************
605 Send an SMBecho (async send)
606 *****************************************************************************/
607 struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
610 struct smbcli_request *req;
612 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
613 if (!req) return NULL;
615 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
617 memcpy(req->out.data, p->in.data, p->in.size);
621 if (!smbcli_request_send(req)) {
622 smbcli_request_destroy(req);
629 /****************************************************************************
630 raw echo interface (async recv)
631 ****************************************************************************/
632 NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
635 if (!smbcli_request_receive(req) ||
636 smbcli_request_is_error(req)) {
640 SMBCLI_CHECK_WCT(req, 1);
642 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
643 p->out.size = req->in.data_size;
644 talloc_free(p->out.data);
645 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
646 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
648 if (!smbcli_raw_pull_data(req, req->in.data, p->out.size, p->out.data)) {
649 req->status = NT_STATUS_BUFFER_TOO_SMALL;
652 if (p->out.count == p->in.repeat_count) {
653 return smbcli_request_destroy(req);
659 return smbcli_request_destroy(req);
662 /****************************************************************************
663 Send a echo (sync interface)
664 *****************************************************************************/
665 NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
667 struct smbcli_request *req = smb_raw_echo_send(transport, p);
668 return smbcli_request_simple_recv(req);