2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "lib/socket/socket.h"
25 #include "lib/util/dlinklist.h"
26 #include "lib/events/events.h"
27 #include "lib/stream/packet.h"
28 #include "librpc/gen_ndr/ndr_nbt.h"
29 #include "param/param.h"
33 an event has happened on the socket
35 static void smbcli_transport_event_handler(struct event_context *ev,
37 uint16_t flags, void *private)
39 struct smbcli_transport *transport = talloc_get_type(private,
40 struct smbcli_transport);
41 if (flags & EVENT_FD_READ) {
42 packet_recv(transport->packet);
45 if (flags & EVENT_FD_WRITE) {
46 packet_queue_run(transport->packet);
53 static int transport_destructor(struct smbcli_transport *transport)
55 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
63 static void smbcli_transport_error(void *private, NTSTATUS status)
65 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
66 smbcli_transport_dead(transport, status);
69 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
72 create a transport structure based on an established socket
74 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
75 TALLOC_CTX *parent_ctx, BOOL primary)
77 struct smbcli_transport *transport;
79 transport = talloc_zero(parent_ctx, struct smbcli_transport);
80 if (!transport) return NULL;
83 transport->socket = talloc_steal(transport, sock);
85 transport->socket = talloc_reference(transport, sock);
87 transport->negotiate.protocol = PROTOCOL_NT1;
88 transport->options.use_spnego = lp_use_spnego(global_loadparm) &&
89 lp_nt_status_support(global_loadparm);
90 transport->options.max_xmit = lp_max_xmit(global_loadparm);
91 transport->options.max_mux = lp_maxmux(global_loadparm);
92 transport->options.request_timeout = SMB_REQUEST_TIMEOUT;
94 transport->negotiate.max_xmit = transport->options.max_xmit;
96 /* setup the stream -> packet parser */
97 transport->packet = packet_init(transport);
98 if (transport->packet == NULL) {
99 talloc_free(transport);
102 packet_set_private(transport->packet, transport);
103 packet_set_socket(transport->packet, transport->socket->sock);
104 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
105 packet_set_full_request(transport->packet, packet_full_request_nbt);
106 packet_set_error_handler(transport->packet, smbcli_transport_error);
107 packet_set_event_context(transport->packet, transport->socket->event.ctx);
108 packet_set_nofree(transport->packet);
110 smbcli_init_signing(transport);
112 ZERO_STRUCT(transport->called);
114 /* take over event handling from the socket layer - it only
115 handles events up until we are connected */
116 talloc_free(transport->socket->event.fde);
117 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
118 transport->socket->sock,
119 socket_get_fd(transport->socket->sock),
121 smbcli_transport_event_handler,
124 packet_set_fde(transport->packet, transport->socket->event.fde);
125 packet_set_serialise(transport->packet);
126 talloc_set_destructor(transport, transport_destructor);
132 mark the transport as dead
134 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
136 smbcli_sock_dead(transport->socket);
138 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
139 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
142 /* kill only the first pending receive - this is so that if
143 that async function frees the connection we don't die trying
144 to use old memory. The caller has to cope with only one
146 if (transport->pending_recv) {
147 struct smbcli_request *req = transport->pending_recv;
148 req->state = SMBCLI_REQUEST_ERROR;
149 req->status = status;
150 DLIST_REMOVE(transport->pending_recv, req);
159 send a session request
161 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
162 struct nbt_name *calling,
163 struct nbt_name *called)
166 struct smbcli_request *req;
167 DATA_BLOB calling_blob, called_blob;
168 TALLOC_CTX *tmp_ctx = talloc_new(transport);
171 status = nbt_name_dup(transport, called, &transport->called);
172 if (!NT_STATUS_IS_OK(status)) goto failed;
174 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
175 if (!NT_STATUS_IS_OK(status)) goto failed;
177 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
178 if (!NT_STATUS_IS_OK(status)) goto failed;
180 /* allocate output buffer */
181 req = smbcli_request_setup_nonsmb(transport,
183 calling_blob.length + called_blob.length);
184 if (req == NULL) goto failed;
186 /* put in the destination name */
187 p = req->out.buffer + NBT_HDR_SIZE;
188 memcpy(p, called_blob.data, called_blob.length);
189 p += called_blob.length;
191 memcpy(p, calling_blob.data, calling_blob.length);
192 p += calling_blob.length;
194 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
195 SCVAL(req->out.buffer,0,0x81);
197 if (!smbcli_request_send(req)) {
198 smbcli_request_destroy(req);
202 talloc_free(tmp_ctx);
206 talloc_free(tmp_ctx);
211 map a session request error to a NTSTATUS
213 static NTSTATUS map_session_refused_error(uint8_t error)
218 return NT_STATUS_REMOTE_NOT_LISTENING;
220 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
222 return NT_STATUS_REMOTE_RESOURCES;
224 return NT_STATUS_UNEXPECTED_IO_ERROR;
229 finish a smbcli_transport_connect()
231 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
235 if (!smbcli_request_receive(req)) {
236 smbcli_request_destroy(req);
237 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
240 switch (CVAL(req->in.buffer,0)) {
242 status = NT_STATUS_OK;
245 status = map_session_refused_error(CVAL(req->in.buffer,4));
248 DEBUG(1,("Warning: session retarget not supported\n"));
249 status = NT_STATUS_NOT_SUPPORTED;
252 status = NT_STATUS_UNEXPECTED_IO_ERROR;
256 smbcli_request_destroy(req);
262 send a session request (if needed)
264 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
265 struct nbt_name *calling,
266 struct nbt_name *called)
268 struct smbcli_request *req;
271 if (transport->socket->port == 445) {
275 req = smbcli_transport_connect_send(transport,
277 status = smbcli_transport_connect_recv(req);
278 return NT_STATUS_IS_OK(status);
281 /****************************************************************************
282 get next mid in sequence
283 ****************************************************************************/
284 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
287 struct smbcli_request *req;
289 mid = transport->next_mid;
292 /* now check to see if this mid is being used by one of the
293 pending requests. This is quite efficient because the list is
294 usually very short */
296 /* the zero mid is reserved for requests that don't have a mid */
297 if (mid == 0) mid = 1;
299 for (req=transport->pending_recv; req; req=req->next) {
300 if (req->mid == mid) {
306 transport->next_mid = mid+1;
310 static void idle_handler(struct event_context *ev,
311 struct timed_event *te, struct timeval t, void *private)
313 struct smbcli_transport *transport = talloc_get_type(private,
314 struct smbcli_transport);
315 struct timeval next = timeval_add(&t, 0, transport->idle.period);
316 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
319 idle_handler, transport);
320 transport->idle.func(transport, transport->idle.private);
324 setup the idle handler for a transport
325 the period is in microseconds
327 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
328 void (*idle_func)(struct smbcli_transport *, void *),
332 transport->idle.func = idle_func;
333 transport->idle.private = private;
334 transport->idle.period = period;
336 if (transport->socket->event.te != NULL) {
337 talloc_free(transport->socket->event.te);
340 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
342 timeval_current_ofs(0, period),
343 idle_handler, transport);
347 we have a full request in our receive buffer - match it to a pending request
350 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
352 struct smbcli_transport *transport = talloc_get_type(private,
353 struct smbcli_transport);
354 uint8_t *buffer, *hdr, *vwv;
356 uint16_t wct=0, mid = 0, op = 0;
357 struct smbcli_request *req = NULL;
362 hdr = buffer+NBT_HDR_SIZE;
365 /* see if it could be an oplock break request */
366 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
371 /* at this point we need to check for a readbraw reply, as
372 these can be any length */
373 if (transport->readbraw_pending) {
374 transport->readbraw_pending = 0;
376 /* it must match the first entry in the pending queue
377 as the client is not allowed to have outstanding
379 req = transport->pending_recv;
380 if (!req) goto error;
382 req->in.buffer = buffer;
383 talloc_steal(req, buffer);
385 req->in.allocated = req->in.size;
389 if (len >= MIN_SMB_SIZE) {
390 /* extract the mid for matching to pending requests */
391 mid = SVAL(hdr, HDR_MID);
392 wct = CVAL(hdr, HDR_WCT);
393 op = CVAL(hdr, HDR_COM);
396 /* match the incoming request against the list of pending requests */
397 for (req=transport->pending_recv; req; req=req->next) {
398 if (req->mid == mid) break;
401 /* see if it's a ntcancel reply for the current MID */
402 req = smbcli_handle_ntcancel_reply(req, len, hdr);
405 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
409 /* fill in the 'in' portion of the matching request */
410 req->in.buffer = buffer;
411 talloc_steal(req, buffer);
413 req->in.allocated = req->in.size;
415 /* handle NBT session replies */
416 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
417 req->status = NT_STATUS_OK;
421 /* handle non-SMB replies */
422 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
423 req->state = SMBCLI_REQUEST_ERROR;
427 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
428 DEBUG(2,("bad reply size for mid %d\n", mid));
429 req->status = NT_STATUS_UNSUCCESSFUL;
430 req->state = SMBCLI_REQUEST_ERROR;
437 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
438 req->in.data = req->in.vwv + VWV(wct) + 2;
439 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
440 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
441 DEBUG(3,("bad data size for mid %d\n", mid));
442 /* blergh - w2k3 gives a bogus data size values in some
444 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
447 req->in.ptr = req->in.data;
448 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
450 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
451 int class = CVAL(req->in.hdr,HDR_RCLS);
452 int code = SVAL(req->in.hdr,HDR_ERR);
453 if (class == 0 && code == 0) {
454 transport->error.e.nt_status = NT_STATUS_OK;
456 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
459 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
462 req->status = transport->error.e.nt_status;
463 if (NT_STATUS_IS_OK(req->status)) {
464 transport->error.etype = ETYPE_NONE;
466 transport->error.etype = ETYPE_SMB;
469 if (!smbcli_request_check_sign_mac(req)) {
470 transport->error.etype = ETYPE_SOCKET;
471 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
472 req->state = SMBCLI_REQUEST_ERROR;
473 req->status = NT_STATUS_ACCESS_DENIED;
478 /* if this request has an async handler then call that to
479 notify that the reply has been received. This might destroy
480 the request so it must happen last */
481 DLIST_REMOVE(transport->pending_recv, req);
482 req->state = SMBCLI_REQUEST_DONE;
490 DLIST_REMOVE(transport->pending_recv, req);
491 req->state = SMBCLI_REQUEST_ERROR;
502 process some read/write requests that are pending
503 return False if the socket is dead
505 BOOL smbcli_transport_process(struct smbcli_transport *transport)
510 packet_queue_run(transport->packet);
511 if (transport->socket->sock == NULL) {
515 status = socket_pending(transport->socket->sock, &npending);
516 if (NT_STATUS_IS_OK(status) && npending > 0) {
517 packet_recv(transport->packet);
519 if (transport->socket->sock == NULL) {
526 handle timeouts of individual smb requests
528 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
529 struct timeval t, void *private)
531 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
533 if (req->state == SMBCLI_REQUEST_RECV) {
534 DLIST_REMOVE(req->transport->pending_recv, req);
536 req->status = NT_STATUS_IO_TIMEOUT;
537 req->state = SMBCLI_REQUEST_ERROR;
547 static int smbcli_request_destructor(struct smbcli_request *req)
549 if (req->state == SMBCLI_REQUEST_RECV) {
550 DLIST_REMOVE(req->transport->pending_recv, req);
557 put a request into the send queue
559 void smbcli_transport_send(struct smbcli_request *req)
564 /* check if the transport is dead */
565 if (req->transport->socket->sock == NULL) {
566 req->state = SMBCLI_REQUEST_ERROR;
567 req->status = NT_STATUS_NET_WRITE_FAULT;
571 blob = data_blob_const(req->out.buffer, req->out.size);
572 status = packet_send(req->transport->packet, blob);
573 if (!NT_STATUS_IS_OK(status)) {
574 req->state = SMBCLI_REQUEST_ERROR;
575 req->status = status;
579 if (req->one_way_request) {
580 req->state = SMBCLI_REQUEST_DONE;
581 smbcli_request_destroy(req);
585 req->state = SMBCLI_REQUEST_RECV;
586 DLIST_ADD(req->transport->pending_recv, req);
589 if (req->transport->options.request_timeout) {
590 event_add_timed(req->transport->socket->event.ctx, req,
591 timeval_current_ofs(req->transport->options.request_timeout, 0),
592 smbcli_timeout_handler, req);
595 talloc_set_destructor(req, smbcli_request_destructor);
599 /****************************************************************************
600 Send an SMBecho (async send)
601 *****************************************************************************/
602 struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
605 struct smbcli_request *req;
607 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
608 if (!req) return NULL;
610 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
612 memcpy(req->out.data, p->in.data, p->in.size);
616 if (!smbcli_request_send(req)) {
617 smbcli_request_destroy(req);
624 /****************************************************************************
625 raw echo interface (async recv)
626 ****************************************************************************/
627 NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
630 if (!smbcli_request_receive(req) ||
631 smbcli_request_is_error(req)) {
635 SMBCLI_CHECK_WCT(req, 1);
637 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
638 p->out.size = req->in.data_size;
639 talloc_free(p->out.data);
640 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
641 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
643 if (!smbcli_raw_pull_data(req, req->in.data, p->out.size, p->out.data)) {
644 req->status = NT_STATUS_BUFFER_TOO_SMALL;
647 if (p->out.count == p->in.repeat_count) {
648 return smbcli_request_destroy(req);
654 return smbcli_request_destroy(req);
657 /****************************************************************************
658 Send a echo (sync interface)
659 *****************************************************************************/
660 NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
662 struct smbcli_request *req = smb_raw_echo_send(transport, p);
663 return smbcli_request_simple_recv(req);