2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "lib/socket/socket.h"
25 #include "lib/util/dlinklist.h"
26 #include "lib/events/events.h"
27 #include "lib/stream/packet.h"
28 #include "librpc/gen_ndr/ndr_nbt.h"
29 #include "param/param.h"
33 an event has happened on the socket
35 static void smbcli_transport_event_handler(struct event_context *ev,
37 uint16_t flags, void *private)
39 struct smbcli_transport *transport = talloc_get_type(private,
40 struct smbcli_transport);
41 if (flags & EVENT_FD_READ) {
42 packet_recv(transport->packet);
45 if (flags & EVENT_FD_WRITE) {
46 packet_queue_run(transport->packet);
53 static int transport_destructor(struct smbcli_transport *transport)
55 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
63 static void smbcli_transport_error(void *private, NTSTATUS status)
65 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
66 smbcli_transport_dead(transport, status);
69 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
72 create a transport structure based on an established socket
74 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
75 TALLOC_CTX *parent_ctx,
77 struct smbcli_options *options)
79 struct smbcli_transport *transport;
81 transport = talloc_zero(parent_ctx, struct smbcli_transport);
82 if (!transport) return NULL;
85 transport->socket = talloc_steal(transport, sock);
87 transport->socket = talloc_reference(transport, sock);
89 transport->negotiate.protocol = PROTOCOL_NT1;
90 transport->options = *options;
91 transport->negotiate.max_xmit = transport->options.max_xmit;
93 /* setup the stream -> packet parser */
94 transport->packet = packet_init(transport);
95 if (transport->packet == NULL) {
96 talloc_free(transport);
99 packet_set_private(transport->packet, transport);
100 packet_set_socket(transport->packet, transport->socket->sock);
101 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
102 packet_set_full_request(transport->packet, packet_full_request_nbt);
103 packet_set_error_handler(transport->packet, smbcli_transport_error);
104 packet_set_event_context(transport->packet, transport->socket->event.ctx);
105 packet_set_nofree(transport->packet);
107 smbcli_init_signing(transport);
109 ZERO_STRUCT(transport->called);
111 /* take over event handling from the socket layer - it only
112 handles events up until we are connected */
113 talloc_free(transport->socket->event.fde);
114 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
115 transport->socket->sock,
116 socket_get_fd(transport->socket->sock),
118 smbcli_transport_event_handler,
121 packet_set_fde(transport->packet, transport->socket->event.fde);
122 packet_set_serialise(transport->packet);
123 talloc_set_destructor(transport, transport_destructor);
129 mark the transport as dead
131 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
133 smbcli_sock_dead(transport->socket);
135 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
136 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
139 /* kill only the first pending receive - this is so that if
140 that async function frees the connection we don't die trying
141 to use old memory. The caller has to cope with only one
143 if (transport->pending_recv) {
144 struct smbcli_request *req = transport->pending_recv;
145 req->state = SMBCLI_REQUEST_ERROR;
146 req->status = status;
147 DLIST_REMOVE(transport->pending_recv, req);
156 send a session request
158 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
159 struct nbt_name *calling,
160 struct nbt_name *called)
163 struct smbcli_request *req;
164 DATA_BLOB calling_blob, called_blob;
165 TALLOC_CTX *tmp_ctx = talloc_new(transport);
167 struct smb_iconv_convenience *iconv_convenience = lp_iconv_convenience(global_loadparm);
169 status = nbt_name_dup(transport, called, &transport->called);
170 if (!NT_STATUS_IS_OK(status)) goto failed;
172 status = nbt_name_to_blob(tmp_ctx, iconv_convenience, &calling_blob, calling);
173 if (!NT_STATUS_IS_OK(status)) goto failed;
175 status = nbt_name_to_blob(tmp_ctx, iconv_convenience, &called_blob, called);
176 if (!NT_STATUS_IS_OK(status)) goto failed;
178 /* allocate output buffer */
179 req = smbcli_request_setup_nonsmb(transport,
181 calling_blob.length + called_blob.length);
182 if (req == NULL) goto failed;
184 /* put in the destination name */
185 p = req->out.buffer + NBT_HDR_SIZE;
186 memcpy(p, called_blob.data, called_blob.length);
187 p += called_blob.length;
189 memcpy(p, calling_blob.data, calling_blob.length);
190 p += calling_blob.length;
192 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
193 SCVAL(req->out.buffer,0,0x81);
195 if (!smbcli_request_send(req)) {
196 smbcli_request_destroy(req);
200 talloc_free(tmp_ctx);
204 talloc_free(tmp_ctx);
209 map a session request error to a NTSTATUS
211 static NTSTATUS map_session_refused_error(uint8_t error)
216 return NT_STATUS_REMOTE_NOT_LISTENING;
218 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
220 return NT_STATUS_REMOTE_RESOURCES;
222 return NT_STATUS_UNEXPECTED_IO_ERROR;
227 finish a smbcli_transport_connect()
229 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
233 if (!smbcli_request_receive(req)) {
234 smbcli_request_destroy(req);
235 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
238 switch (CVAL(req->in.buffer,0)) {
240 status = NT_STATUS_OK;
243 status = map_session_refused_error(CVAL(req->in.buffer,4));
246 DEBUG(1,("Warning: session retarget not supported\n"));
247 status = NT_STATUS_NOT_SUPPORTED;
250 status = NT_STATUS_UNEXPECTED_IO_ERROR;
254 smbcli_request_destroy(req);
260 send a session request (if needed)
262 bool smbcli_transport_connect(struct smbcli_transport *transport,
263 struct nbt_name *calling,
264 struct nbt_name *called)
266 struct smbcli_request *req;
269 if (transport->socket->port == 445) {
273 req = smbcli_transport_connect_send(transport,
275 status = smbcli_transport_connect_recv(req);
276 return NT_STATUS_IS_OK(status);
279 /****************************************************************************
280 get next mid in sequence
281 ****************************************************************************/
282 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
285 struct smbcli_request *req;
287 mid = transport->next_mid;
290 /* now check to see if this mid is being used by one of the
291 pending requests. This is quite efficient because the list is
292 usually very short */
294 /* the zero mid is reserved for requests that don't have a mid */
295 if (mid == 0) mid = 1;
297 for (req=transport->pending_recv; req; req=req->next) {
298 if (req->mid == mid) {
304 transport->next_mid = mid+1;
308 static void idle_handler(struct event_context *ev,
309 struct timed_event *te, struct timeval t, void *private)
311 struct smbcli_transport *transport = talloc_get_type(private,
312 struct smbcli_transport);
313 struct timeval next = timeval_add(&t, 0, transport->idle.period);
314 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
317 idle_handler, transport);
318 transport->idle.func(transport, transport->idle.private);
322 setup the idle handler for a transport
323 the period is in microseconds
325 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
326 void (*idle_func)(struct smbcli_transport *, void *),
330 transport->idle.func = idle_func;
331 transport->idle.private = private;
332 transport->idle.period = period;
334 if (transport->socket->event.te != NULL) {
335 talloc_free(transport->socket->event.te);
338 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
340 timeval_current_ofs(0, period),
341 idle_handler, transport);
345 we have a full request in our receive buffer - match it to a pending request
348 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
350 struct smbcli_transport *transport = talloc_get_type(private,
351 struct smbcli_transport);
352 uint8_t *buffer, *hdr, *vwv;
354 uint16_t wct=0, mid = 0, op = 0;
355 struct smbcli_request *req = NULL;
360 hdr = buffer+NBT_HDR_SIZE;
363 /* see if it could be an oplock break request */
364 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
369 /* at this point we need to check for a readbraw reply, as
370 these can be any length */
371 if (transport->readbraw_pending) {
372 transport->readbraw_pending = 0;
374 /* it must match the first entry in the pending queue
375 as the client is not allowed to have outstanding
377 req = transport->pending_recv;
378 if (!req) goto error;
380 req->in.buffer = buffer;
381 talloc_steal(req, buffer);
383 req->in.allocated = req->in.size;
387 if (len >= MIN_SMB_SIZE) {
388 /* extract the mid for matching to pending requests */
389 mid = SVAL(hdr, HDR_MID);
390 wct = CVAL(hdr, HDR_WCT);
391 op = CVAL(hdr, HDR_COM);
394 /* match the incoming request against the list of pending requests */
395 for (req=transport->pending_recv; req; req=req->next) {
396 if (req->mid == mid) break;
399 /* see if it's a ntcancel reply for the current MID */
400 req = smbcli_handle_ntcancel_reply(req, len, hdr);
403 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
407 /* fill in the 'in' portion of the matching request */
408 req->in.buffer = buffer;
409 talloc_steal(req, buffer);
411 req->in.allocated = req->in.size;
413 /* handle NBT session replies */
414 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
415 req->status = NT_STATUS_OK;
419 /* handle non-SMB replies */
420 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
421 req->state = SMBCLI_REQUEST_ERROR;
425 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
426 DEBUG(2,("bad reply size for mid %d\n", mid));
427 req->status = NT_STATUS_UNSUCCESSFUL;
428 req->state = SMBCLI_REQUEST_ERROR;
435 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
436 req->in.data = req->in.vwv + VWV(wct) + 2;
437 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
438 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
439 DEBUG(3,("bad data size for mid %d\n", mid));
440 /* blergh - w2k3 gives a bogus data size values in some
442 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
445 req->in.ptr = req->in.data;
446 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
448 smb_setup_bufinfo(req);
450 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
451 int class = CVAL(req->in.hdr,HDR_RCLS);
452 int code = SVAL(req->in.hdr,HDR_ERR);
453 if (class == 0 && code == 0) {
454 transport->error.e.nt_status = NT_STATUS_OK;
456 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
459 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
462 req->status = transport->error.e.nt_status;
463 if (NT_STATUS_IS_OK(req->status)) {
464 transport->error.etype = ETYPE_NONE;
466 transport->error.etype = ETYPE_SMB;
469 if (!smbcli_request_check_sign_mac(req)) {
470 transport->error.etype = ETYPE_SOCKET;
471 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
472 req->state = SMBCLI_REQUEST_ERROR;
473 req->status = NT_STATUS_ACCESS_DENIED;
478 /* if this request has an async handler then call that to
479 notify that the reply has been received. This might destroy
480 the request so it must happen last */
481 DLIST_REMOVE(transport->pending_recv, req);
482 req->state = SMBCLI_REQUEST_DONE;
490 DLIST_REMOVE(transport->pending_recv, req);
491 req->state = SMBCLI_REQUEST_ERROR;
502 process some read/write requests that are pending
503 return false if the socket is dead
505 bool smbcli_transport_process(struct smbcli_transport *transport)
510 packet_queue_run(transport->packet);
511 if (transport->socket->sock == NULL) {
515 status = socket_pending(transport->socket->sock, &npending);
516 if (NT_STATUS_IS_OK(status) && npending > 0) {
517 packet_recv(transport->packet);
519 if (transport->socket->sock == NULL) {
526 handle timeouts of individual smb requests
528 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
529 struct timeval t, void *private)
531 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
533 if (req->state == SMBCLI_REQUEST_RECV) {
534 DLIST_REMOVE(req->transport->pending_recv, req);
536 req->status = NT_STATUS_IO_TIMEOUT;
537 req->state = SMBCLI_REQUEST_ERROR;
547 static int smbcli_request_destructor(struct smbcli_request *req)
549 if (req->state == SMBCLI_REQUEST_RECV) {
550 DLIST_REMOVE(req->transport->pending_recv, req);
557 put a request into the send queue
559 void smbcli_transport_send(struct smbcli_request *req)
564 /* check if the transport is dead */
565 if (req->transport->socket->sock == NULL) {
566 req->state = SMBCLI_REQUEST_ERROR;
567 req->status = NT_STATUS_NET_WRITE_FAULT;
571 blob = data_blob_const(req->out.buffer, req->out.size);
572 status = packet_send(req->transport->packet, blob);
573 if (!NT_STATUS_IS_OK(status)) {
574 req->state = SMBCLI_REQUEST_ERROR;
575 req->status = status;
579 if (req->one_way_request) {
580 req->state = SMBCLI_REQUEST_DONE;
581 smbcli_request_destroy(req);
585 req->state = SMBCLI_REQUEST_RECV;
586 DLIST_ADD(req->transport->pending_recv, req);
589 if (req->transport->options.request_timeout) {
590 event_add_timed(req->transport->socket->event.ctx, req,
591 timeval_current_ofs(req->transport->options.request_timeout, 0),
592 smbcli_timeout_handler, req);
595 talloc_set_destructor(req, smbcli_request_destructor);
599 /****************************************************************************
600 Send an SMBecho (async send)
601 *****************************************************************************/
602 struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
605 struct smbcli_request *req;
607 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
608 if (!req) return NULL;
610 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
612 memcpy(req->out.data, p->in.data, p->in.size);
616 if (!smbcli_request_send(req)) {
617 smbcli_request_destroy(req);
624 /****************************************************************************
625 raw echo interface (async recv)
626 ****************************************************************************/
627 NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
630 if (!smbcli_request_receive(req) ||
631 smbcli_request_is_error(req)) {
635 SMBCLI_CHECK_WCT(req, 1);
637 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
638 p->out.size = req->in.data_size;
639 talloc_free(p->out.data);
640 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
641 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
643 if (!smbcli_raw_pull_data(&req->in.bufinfo, req->in.data, p->out.size, p->out.data)) {
644 req->status = NT_STATUS_BUFFER_TOO_SMALL;
647 if (p->out.count == p->in.repeat_count) {
648 return smbcli_request_destroy(req);
654 return smbcli_request_destroy(req);
657 /****************************************************************************
658 Send a echo (sync interface)
659 *****************************************************************************/
660 NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
662 struct smbcli_request *req = smb_raw_echo_send(transport, p);
663 return smbcli_request_simple_recv(req);