2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "lib/socket/socket.h"
25 #include "lib/util/dlinklist.h"
26 #include "lib/events/events.h"
27 #include "lib/stream/packet.h"
28 #include "librpc/gen_ndr/ndr_nbt.h"
29 #include "param/param.h"
33 an event has happened on the socket
35 static void smbcli_transport_event_handler(struct event_context *ev,
37 uint16_t flags, void *private)
39 struct smbcli_transport *transport = talloc_get_type(private,
40 struct smbcli_transport);
41 if (flags & EVENT_FD_READ) {
42 packet_recv(transport->packet);
45 if (flags & EVENT_FD_WRITE) {
46 packet_queue_run(transport->packet);
53 static int transport_destructor(struct smbcli_transport *transport)
55 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
63 static void smbcli_transport_error(void *private, NTSTATUS status)
65 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
66 smbcli_transport_dead(transport, status);
69 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
72 create a transport structure based on an established socket
74 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
75 TALLOC_CTX *parent_ctx,
81 struct smbcli_transport *transport;
83 transport = talloc_zero(parent_ctx, struct smbcli_transport);
84 if (!transport) return NULL;
87 transport->socket = talloc_steal(transport, sock);
89 transport->socket = talloc_reference(transport, sock);
91 transport->negotiate.protocol = PROTOCOL_NT1;
92 transport->options.use_spnego = use_spnego;
93 transport->options.max_xmit = max_xmit;
94 transport->options.max_mux = max_mux;
95 transport->options.request_timeout = SMB_REQUEST_TIMEOUT;
97 transport->negotiate.max_xmit = transport->options.max_xmit;
99 /* setup the stream -> packet parser */
100 transport->packet = packet_init(transport);
101 if (transport->packet == NULL) {
102 talloc_free(transport);
105 packet_set_private(transport->packet, transport);
106 packet_set_socket(transport->packet, transport->socket->sock);
107 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
108 packet_set_full_request(transport->packet, packet_full_request_nbt);
109 packet_set_error_handler(transport->packet, smbcli_transport_error);
110 packet_set_event_context(transport->packet, transport->socket->event.ctx);
111 packet_set_nofree(transport->packet);
113 smbcli_init_signing(transport);
115 ZERO_STRUCT(transport->called);
117 /* take over event handling from the socket layer - it only
118 handles events up until we are connected */
119 talloc_free(transport->socket->event.fde);
120 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
121 transport->socket->sock,
122 socket_get_fd(transport->socket->sock),
124 smbcli_transport_event_handler,
127 packet_set_fde(transport->packet, transport->socket->event.fde);
128 packet_set_serialise(transport->packet);
129 talloc_set_destructor(transport, transport_destructor);
135 mark the transport as dead
137 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
139 smbcli_sock_dead(transport->socket);
141 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
142 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
145 /* kill only the first pending receive - this is so that if
146 that async function frees the connection we don't die trying
147 to use old memory. The caller has to cope with only one
149 if (transport->pending_recv) {
150 struct smbcli_request *req = transport->pending_recv;
151 req->state = SMBCLI_REQUEST_ERROR;
152 req->status = status;
153 DLIST_REMOVE(transport->pending_recv, req);
162 send a session request
164 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
165 struct nbt_name *calling,
166 struct nbt_name *called)
169 struct smbcli_request *req;
170 DATA_BLOB calling_blob, called_blob;
171 TALLOC_CTX *tmp_ctx = talloc_new(transport);
174 status = nbt_name_dup(transport, called, &transport->called);
175 if (!NT_STATUS_IS_OK(status)) goto failed;
177 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
178 if (!NT_STATUS_IS_OK(status)) goto failed;
180 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
181 if (!NT_STATUS_IS_OK(status)) goto failed;
183 /* allocate output buffer */
184 req = smbcli_request_setup_nonsmb(transport,
186 calling_blob.length + called_blob.length);
187 if (req == NULL) goto failed;
189 /* put in the destination name */
190 p = req->out.buffer + NBT_HDR_SIZE;
191 memcpy(p, called_blob.data, called_blob.length);
192 p += called_blob.length;
194 memcpy(p, calling_blob.data, calling_blob.length);
195 p += calling_blob.length;
197 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
198 SCVAL(req->out.buffer,0,0x81);
200 if (!smbcli_request_send(req)) {
201 smbcli_request_destroy(req);
205 talloc_free(tmp_ctx);
209 talloc_free(tmp_ctx);
214 map a session request error to a NTSTATUS
216 static NTSTATUS map_session_refused_error(uint8_t error)
221 return NT_STATUS_REMOTE_NOT_LISTENING;
223 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
225 return NT_STATUS_REMOTE_RESOURCES;
227 return NT_STATUS_UNEXPECTED_IO_ERROR;
232 finish a smbcli_transport_connect()
234 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
238 if (!smbcli_request_receive(req)) {
239 smbcli_request_destroy(req);
240 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
243 switch (CVAL(req->in.buffer,0)) {
245 status = NT_STATUS_OK;
248 status = map_session_refused_error(CVAL(req->in.buffer,4));
251 DEBUG(1,("Warning: session retarget not supported\n"));
252 status = NT_STATUS_NOT_SUPPORTED;
255 status = NT_STATUS_UNEXPECTED_IO_ERROR;
259 smbcli_request_destroy(req);
265 send a session request (if needed)
267 bool smbcli_transport_connect(struct smbcli_transport *transport,
268 struct nbt_name *calling,
269 struct nbt_name *called)
271 struct smbcli_request *req;
274 if (transport->socket->port == 445) {
278 req = smbcli_transport_connect_send(transport,
280 status = smbcli_transport_connect_recv(req);
281 return NT_STATUS_IS_OK(status);
284 /****************************************************************************
285 get next mid in sequence
286 ****************************************************************************/
287 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
290 struct smbcli_request *req;
292 mid = transport->next_mid;
295 /* now check to see if this mid is being used by one of the
296 pending requests. This is quite efficient because the list is
297 usually very short */
299 /* the zero mid is reserved for requests that don't have a mid */
300 if (mid == 0) mid = 1;
302 for (req=transport->pending_recv; req; req=req->next) {
303 if (req->mid == mid) {
309 transport->next_mid = mid+1;
313 static void idle_handler(struct event_context *ev,
314 struct timed_event *te, struct timeval t, void *private)
316 struct smbcli_transport *transport = talloc_get_type(private,
317 struct smbcli_transport);
318 struct timeval next = timeval_add(&t, 0, transport->idle.period);
319 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
322 idle_handler, transport);
323 transport->idle.func(transport, transport->idle.private);
327 setup the idle handler for a transport
328 the period is in microseconds
330 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
331 void (*idle_func)(struct smbcli_transport *, void *),
335 transport->idle.func = idle_func;
336 transport->idle.private = private;
337 transport->idle.period = period;
339 if (transport->socket->event.te != NULL) {
340 talloc_free(transport->socket->event.te);
343 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
345 timeval_current_ofs(0, period),
346 idle_handler, transport);
350 we have a full request in our receive buffer - match it to a pending request
353 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
355 struct smbcli_transport *transport = talloc_get_type(private,
356 struct smbcli_transport);
357 uint8_t *buffer, *hdr, *vwv;
359 uint16_t wct=0, mid = 0, op = 0;
360 struct smbcli_request *req = NULL;
365 hdr = buffer+NBT_HDR_SIZE;
368 /* see if it could be an oplock break request */
369 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
374 /* at this point we need to check for a readbraw reply, as
375 these can be any length */
376 if (transport->readbraw_pending) {
377 transport->readbraw_pending = 0;
379 /* it must match the first entry in the pending queue
380 as the client is not allowed to have outstanding
382 req = transport->pending_recv;
383 if (!req) goto error;
385 req->in.buffer = buffer;
386 talloc_steal(req, buffer);
388 req->in.allocated = req->in.size;
392 if (len >= MIN_SMB_SIZE) {
393 /* extract the mid for matching to pending requests */
394 mid = SVAL(hdr, HDR_MID);
395 wct = CVAL(hdr, HDR_WCT);
396 op = CVAL(hdr, HDR_COM);
399 /* match the incoming request against the list of pending requests */
400 for (req=transport->pending_recv; req; req=req->next) {
401 if (req->mid == mid) break;
404 /* see if it's a ntcancel reply for the current MID */
405 req = smbcli_handle_ntcancel_reply(req, len, hdr);
408 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
412 /* fill in the 'in' portion of the matching request */
413 req->in.buffer = buffer;
414 talloc_steal(req, buffer);
416 req->in.allocated = req->in.size;
418 /* handle NBT session replies */
419 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
420 req->status = NT_STATUS_OK;
424 /* handle non-SMB replies */
425 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
426 req->state = SMBCLI_REQUEST_ERROR;
430 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
431 DEBUG(2,("bad reply size for mid %d\n", mid));
432 req->status = NT_STATUS_UNSUCCESSFUL;
433 req->state = SMBCLI_REQUEST_ERROR;
440 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
441 req->in.data = req->in.vwv + VWV(wct) + 2;
442 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
443 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
444 DEBUG(3,("bad data size for mid %d\n", mid));
445 /* blergh - w2k3 gives a bogus data size values in some
447 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
450 req->in.ptr = req->in.data;
451 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
453 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
454 int class = CVAL(req->in.hdr,HDR_RCLS);
455 int code = SVAL(req->in.hdr,HDR_ERR);
456 if (class == 0 && code == 0) {
457 transport->error.e.nt_status = NT_STATUS_OK;
459 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
462 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
465 req->status = transport->error.e.nt_status;
466 if (NT_STATUS_IS_OK(req->status)) {
467 transport->error.etype = ETYPE_NONE;
469 transport->error.etype = ETYPE_SMB;
472 if (!smbcli_request_check_sign_mac(req)) {
473 transport->error.etype = ETYPE_SOCKET;
474 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
475 req->state = SMBCLI_REQUEST_ERROR;
476 req->status = NT_STATUS_ACCESS_DENIED;
481 /* if this request has an async handler then call that to
482 notify that the reply has been received. This might destroy
483 the request so it must happen last */
484 DLIST_REMOVE(transport->pending_recv, req);
485 req->state = SMBCLI_REQUEST_DONE;
493 DLIST_REMOVE(transport->pending_recv, req);
494 req->state = SMBCLI_REQUEST_ERROR;
505 process some read/write requests that are pending
506 return false if the socket is dead
508 bool smbcli_transport_process(struct smbcli_transport *transport)
513 packet_queue_run(transport->packet);
514 if (transport->socket->sock == NULL) {
518 status = socket_pending(transport->socket->sock, &npending);
519 if (NT_STATUS_IS_OK(status) && npending > 0) {
520 packet_recv(transport->packet);
522 if (transport->socket->sock == NULL) {
529 handle timeouts of individual smb requests
531 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
532 struct timeval t, void *private)
534 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
536 if (req->state == SMBCLI_REQUEST_RECV) {
537 DLIST_REMOVE(req->transport->pending_recv, req);
539 req->status = NT_STATUS_IO_TIMEOUT;
540 req->state = SMBCLI_REQUEST_ERROR;
550 static int smbcli_request_destructor(struct smbcli_request *req)
552 if (req->state == SMBCLI_REQUEST_RECV) {
553 DLIST_REMOVE(req->transport->pending_recv, req);
560 put a request into the send queue
562 void smbcli_transport_send(struct smbcli_request *req)
567 /* check if the transport is dead */
568 if (req->transport->socket->sock == NULL) {
569 req->state = SMBCLI_REQUEST_ERROR;
570 req->status = NT_STATUS_NET_WRITE_FAULT;
574 blob = data_blob_const(req->out.buffer, req->out.size);
575 status = packet_send(req->transport->packet, blob);
576 if (!NT_STATUS_IS_OK(status)) {
577 req->state = SMBCLI_REQUEST_ERROR;
578 req->status = status;
582 if (req->one_way_request) {
583 req->state = SMBCLI_REQUEST_DONE;
584 smbcli_request_destroy(req);
588 req->state = SMBCLI_REQUEST_RECV;
589 DLIST_ADD(req->transport->pending_recv, req);
592 if (req->transport->options.request_timeout) {
593 event_add_timed(req->transport->socket->event.ctx, req,
594 timeval_current_ofs(req->transport->options.request_timeout, 0),
595 smbcli_timeout_handler, req);
598 talloc_set_destructor(req, smbcli_request_destructor);
602 /****************************************************************************
603 Send an SMBecho (async send)
604 *****************************************************************************/
605 struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
608 struct smbcli_request *req;
610 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
611 if (!req) return NULL;
613 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
615 memcpy(req->out.data, p->in.data, p->in.size);
619 if (!smbcli_request_send(req)) {
620 smbcli_request_destroy(req);
627 /****************************************************************************
628 raw echo interface (async recv)
629 ****************************************************************************/
630 NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
633 if (!smbcli_request_receive(req) ||
634 smbcli_request_is_error(req)) {
638 SMBCLI_CHECK_WCT(req, 1);
640 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
641 p->out.size = req->in.data_size;
642 talloc_free(p->out.data);
643 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
644 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
646 if (!smbcli_raw_pull_data(req, req->in.data, p->out.size, p->out.data)) {
647 req->status = NT_STATUS_BUFFER_TOO_SMALL;
650 if (p->out.count == p->in.repeat_count) {
651 return smbcli_request_destroy(req);
657 return smbcli_request_destroy(req);
660 /****************************************************************************
661 Send a echo (sync interface)
662 *****************************************************************************/
663 NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
665 struct smbcli_request *req = smb_raw_echo_send(transport, p);
666 return smbcli_request_simple_recv(req);