2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "lib/socket/socket.h"
25 #include "lib/util/dlinklist.h"
26 #include "lib/events/events.h"
27 #include "lib/stream/packet.h"
28 #include "librpc/gen_ndr/ndr_nbt.h"
29 #include "param/param.h"
33 an event has happened on the socket
35 static void smbcli_transport_event_handler(struct event_context *ev,
37 uint16_t flags, void *private)
39 struct smbcli_transport *transport = talloc_get_type(private,
40 struct smbcli_transport);
41 if (flags & EVENT_FD_READ) {
42 packet_recv(transport->packet);
45 if (flags & EVENT_FD_WRITE) {
46 packet_queue_run(transport->packet);
53 static int transport_destructor(struct smbcli_transport *transport)
55 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
63 static void smbcli_transport_error(void *private, NTSTATUS status)
65 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
66 smbcli_transport_dead(transport, status);
69 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
72 create a transport structure based on an established socket
74 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
75 TALLOC_CTX *parent_ctx,
78 struct smbcli_transport *transport;
80 transport = talloc_zero(parent_ctx, struct smbcli_transport);
81 if (!transport) return NULL;
84 transport->socket = talloc_steal(transport, sock);
86 transport->socket = talloc_reference(transport, sock);
88 transport->negotiate.protocol = PROTOCOL_NT1;
89 transport->options.use_spnego = lp_use_spnego(global_loadparm) &&
90 lp_nt_status_support(global_loadparm);
91 transport->options.max_xmit = lp_max_xmit(global_loadparm);
92 transport->options.max_mux = lp_maxmux(global_loadparm);
93 transport->options.request_timeout = SMB_REQUEST_TIMEOUT;
95 transport->negotiate.max_xmit = transport->options.max_xmit;
97 /* setup the stream -> packet parser */
98 transport->packet = packet_init(transport);
99 if (transport->packet == NULL) {
100 talloc_free(transport);
103 packet_set_private(transport->packet, transport);
104 packet_set_socket(transport->packet, transport->socket->sock);
105 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
106 packet_set_full_request(transport->packet, packet_full_request_nbt);
107 packet_set_error_handler(transport->packet, smbcli_transport_error);
108 packet_set_event_context(transport->packet, transport->socket->event.ctx);
109 packet_set_nofree(transport->packet);
111 smbcli_init_signing(transport);
113 ZERO_STRUCT(transport->called);
115 /* take over event handling from the socket layer - it only
116 handles events up until we are connected */
117 talloc_free(transport->socket->event.fde);
118 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
119 transport->socket->sock,
120 socket_get_fd(transport->socket->sock),
122 smbcli_transport_event_handler,
125 packet_set_fde(transport->packet, transport->socket->event.fde);
126 packet_set_serialise(transport->packet);
127 talloc_set_destructor(transport, transport_destructor);
133 mark the transport as dead
135 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
137 smbcli_sock_dead(transport->socket);
139 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
140 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
143 /* kill only the first pending receive - this is so that if
144 that async function frees the connection we don't die trying
145 to use old memory. The caller has to cope with only one
147 if (transport->pending_recv) {
148 struct smbcli_request *req = transport->pending_recv;
149 req->state = SMBCLI_REQUEST_ERROR;
150 req->status = status;
151 DLIST_REMOVE(transport->pending_recv, req);
160 send a session request
162 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
163 struct nbt_name *calling,
164 struct nbt_name *called)
167 struct smbcli_request *req;
168 DATA_BLOB calling_blob, called_blob;
169 TALLOC_CTX *tmp_ctx = talloc_new(transport);
172 status = nbt_name_dup(transport, called, &transport->called);
173 if (!NT_STATUS_IS_OK(status)) goto failed;
175 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
176 if (!NT_STATUS_IS_OK(status)) goto failed;
178 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
179 if (!NT_STATUS_IS_OK(status)) goto failed;
181 /* allocate output buffer */
182 req = smbcli_request_setup_nonsmb(transport,
184 calling_blob.length + called_blob.length);
185 if (req == NULL) goto failed;
187 /* put in the destination name */
188 p = req->out.buffer + NBT_HDR_SIZE;
189 memcpy(p, called_blob.data, called_blob.length);
190 p += called_blob.length;
192 memcpy(p, calling_blob.data, calling_blob.length);
193 p += calling_blob.length;
195 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
196 SCVAL(req->out.buffer,0,0x81);
198 if (!smbcli_request_send(req)) {
199 smbcli_request_destroy(req);
203 talloc_free(tmp_ctx);
207 talloc_free(tmp_ctx);
212 map a session request error to a NTSTATUS
214 static NTSTATUS map_session_refused_error(uint8_t error)
219 return NT_STATUS_REMOTE_NOT_LISTENING;
221 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
223 return NT_STATUS_REMOTE_RESOURCES;
225 return NT_STATUS_UNEXPECTED_IO_ERROR;
230 finish a smbcli_transport_connect()
232 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
236 if (!smbcli_request_receive(req)) {
237 smbcli_request_destroy(req);
238 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
241 switch (CVAL(req->in.buffer,0)) {
243 status = NT_STATUS_OK;
246 status = map_session_refused_error(CVAL(req->in.buffer,4));
249 DEBUG(1,("Warning: session retarget not supported\n"));
250 status = NT_STATUS_NOT_SUPPORTED;
253 status = NT_STATUS_UNEXPECTED_IO_ERROR;
257 smbcli_request_destroy(req);
263 send a session request (if needed)
265 bool smbcli_transport_connect(struct smbcli_transport *transport,
266 struct nbt_name *calling,
267 struct nbt_name *called)
269 struct smbcli_request *req;
272 if (transport->socket->port == 445) {
276 req = smbcli_transport_connect_send(transport,
278 status = smbcli_transport_connect_recv(req);
279 return NT_STATUS_IS_OK(status);
282 /****************************************************************************
283 get next mid in sequence
284 ****************************************************************************/
285 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
288 struct smbcli_request *req;
290 mid = transport->next_mid;
293 /* now check to see if this mid is being used by one of the
294 pending requests. This is quite efficient because the list is
295 usually very short */
297 /* the zero mid is reserved for requests that don't have a mid */
298 if (mid == 0) mid = 1;
300 for (req=transport->pending_recv; req; req=req->next) {
301 if (req->mid == mid) {
307 transport->next_mid = mid+1;
311 static void idle_handler(struct event_context *ev,
312 struct timed_event *te, struct timeval t, void *private)
314 struct smbcli_transport *transport = talloc_get_type(private,
315 struct smbcli_transport);
316 struct timeval next = timeval_add(&t, 0, transport->idle.period);
317 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
320 idle_handler, transport);
321 transport->idle.func(transport, transport->idle.private);
325 setup the idle handler for a transport
326 the period is in microseconds
328 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
329 void (*idle_func)(struct smbcli_transport *, void *),
333 transport->idle.func = idle_func;
334 transport->idle.private = private;
335 transport->idle.period = period;
337 if (transport->socket->event.te != NULL) {
338 talloc_free(transport->socket->event.te);
341 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
343 timeval_current_ofs(0, period),
344 idle_handler, transport);
348 we have a full request in our receive buffer - match it to a pending request
351 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
353 struct smbcli_transport *transport = talloc_get_type(private,
354 struct smbcli_transport);
355 uint8_t *buffer, *hdr, *vwv;
357 uint16_t wct=0, mid = 0, op = 0;
358 struct smbcli_request *req = NULL;
363 hdr = buffer+NBT_HDR_SIZE;
366 /* see if it could be an oplock break request */
367 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
372 /* at this point we need to check for a readbraw reply, as
373 these can be any length */
374 if (transport->readbraw_pending) {
375 transport->readbraw_pending = 0;
377 /* it must match the first entry in the pending queue
378 as the client is not allowed to have outstanding
380 req = transport->pending_recv;
381 if (!req) goto error;
383 req->in.buffer = buffer;
384 talloc_steal(req, buffer);
386 req->in.allocated = req->in.size;
390 if (len >= MIN_SMB_SIZE) {
391 /* extract the mid for matching to pending requests */
392 mid = SVAL(hdr, HDR_MID);
393 wct = CVAL(hdr, HDR_WCT);
394 op = CVAL(hdr, HDR_COM);
397 /* match the incoming request against the list of pending requests */
398 for (req=transport->pending_recv; req; req=req->next) {
399 if (req->mid == mid) break;
402 /* see if it's a ntcancel reply for the current MID */
403 req = smbcli_handle_ntcancel_reply(req, len, hdr);
406 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
410 /* fill in the 'in' portion of the matching request */
411 req->in.buffer = buffer;
412 talloc_steal(req, buffer);
414 req->in.allocated = req->in.size;
416 /* handle NBT session replies */
417 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
418 req->status = NT_STATUS_OK;
422 /* handle non-SMB replies */
423 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
424 req->state = SMBCLI_REQUEST_ERROR;
428 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
429 DEBUG(2,("bad reply size for mid %d\n", mid));
430 req->status = NT_STATUS_UNSUCCESSFUL;
431 req->state = SMBCLI_REQUEST_ERROR;
438 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
439 req->in.data = req->in.vwv + VWV(wct) + 2;
440 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
441 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
442 DEBUG(3,("bad data size for mid %d\n", mid));
443 /* blergh - w2k3 gives a bogus data size values in some
445 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
448 req->in.ptr = req->in.data;
449 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
451 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
452 int class = CVAL(req->in.hdr,HDR_RCLS);
453 int code = SVAL(req->in.hdr,HDR_ERR);
454 if (class == 0 && code == 0) {
455 transport->error.e.nt_status = NT_STATUS_OK;
457 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
460 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
463 req->status = transport->error.e.nt_status;
464 if (NT_STATUS_IS_OK(req->status)) {
465 transport->error.etype = ETYPE_NONE;
467 transport->error.etype = ETYPE_SMB;
470 if (!smbcli_request_check_sign_mac(req)) {
471 transport->error.etype = ETYPE_SOCKET;
472 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
473 req->state = SMBCLI_REQUEST_ERROR;
474 req->status = NT_STATUS_ACCESS_DENIED;
479 /* if this request has an async handler then call that to
480 notify that the reply has been received. This might destroy
481 the request so it must happen last */
482 DLIST_REMOVE(transport->pending_recv, req);
483 req->state = SMBCLI_REQUEST_DONE;
491 DLIST_REMOVE(transport->pending_recv, req);
492 req->state = SMBCLI_REQUEST_ERROR;
503 process some read/write requests that are pending
504 return false if the socket is dead
506 bool smbcli_transport_process(struct smbcli_transport *transport)
511 packet_queue_run(transport->packet);
512 if (transport->socket->sock == NULL) {
516 status = socket_pending(transport->socket->sock, &npending);
517 if (NT_STATUS_IS_OK(status) && npending > 0) {
518 packet_recv(transport->packet);
520 if (transport->socket->sock == NULL) {
527 handle timeouts of individual smb requests
529 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
530 struct timeval t, void *private)
532 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
534 if (req->state == SMBCLI_REQUEST_RECV) {
535 DLIST_REMOVE(req->transport->pending_recv, req);
537 req->status = NT_STATUS_IO_TIMEOUT;
538 req->state = SMBCLI_REQUEST_ERROR;
548 static int smbcli_request_destructor(struct smbcli_request *req)
550 if (req->state == SMBCLI_REQUEST_RECV) {
551 DLIST_REMOVE(req->transport->pending_recv, req);
558 put a request into the send queue
560 void smbcli_transport_send(struct smbcli_request *req)
565 /* check if the transport is dead */
566 if (req->transport->socket->sock == NULL) {
567 req->state = SMBCLI_REQUEST_ERROR;
568 req->status = NT_STATUS_NET_WRITE_FAULT;
572 blob = data_blob_const(req->out.buffer, req->out.size);
573 status = packet_send(req->transport->packet, blob);
574 if (!NT_STATUS_IS_OK(status)) {
575 req->state = SMBCLI_REQUEST_ERROR;
576 req->status = status;
580 if (req->one_way_request) {
581 req->state = SMBCLI_REQUEST_DONE;
582 smbcli_request_destroy(req);
586 req->state = SMBCLI_REQUEST_RECV;
587 DLIST_ADD(req->transport->pending_recv, req);
590 if (req->transport->options.request_timeout) {
591 event_add_timed(req->transport->socket->event.ctx, req,
592 timeval_current_ofs(req->transport->options.request_timeout, 0),
593 smbcli_timeout_handler, req);
596 talloc_set_destructor(req, smbcli_request_destructor);
600 /****************************************************************************
601 Send an SMBecho (async send)
602 *****************************************************************************/
603 struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
606 struct smbcli_request *req;
608 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
609 if (!req) return NULL;
611 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
613 memcpy(req->out.data, p->in.data, p->in.size);
617 if (!smbcli_request_send(req)) {
618 smbcli_request_destroy(req);
625 /****************************************************************************
626 raw echo interface (async recv)
627 ****************************************************************************/
628 NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
631 if (!smbcli_request_receive(req) ||
632 smbcli_request_is_error(req)) {
636 SMBCLI_CHECK_WCT(req, 1);
638 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
639 p->out.size = req->in.data_size;
640 talloc_free(p->out.data);
641 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
642 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
644 if (!smbcli_raw_pull_data(req, req->in.data, p->out.size, p->out.data)) {
645 req->status = NT_STATUS_BUFFER_TOO_SMALL;
648 if (p->out.count == p->in.repeat_count) {
649 return smbcli_request_destroy(req);
655 return smbcli_request_destroy(req);
658 /****************************************************************************
659 Send a echo (sync interface)
660 *****************************************************************************/
661 NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
663 struct smbcli_request *req = smb_raw_echo_send(transport, p);
664 return smbcli_request_simple_recv(req);