2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "libcli/raw/libcliraw.h"
24 #include "lib/socket/socket.h"
25 #include "lib/util/dlinklist.h"
26 #include "lib/events/events.h"
27 #include "lib/stream/packet.h"
28 #include "librpc/gen_ndr/ndr_nbt.h"
32 an event has happened on the socket
34 static void smbcli_transport_event_handler(struct event_context *ev,
36 uint16_t flags, void *private)
38 struct smbcli_transport *transport = talloc_get_type(private,
39 struct smbcli_transport);
40 if (flags & EVENT_FD_READ) {
41 packet_recv(transport->packet);
44 if (flags & EVENT_FD_WRITE) {
45 packet_queue_run(transport->packet);
52 static int transport_destructor(struct smbcli_transport *transport)
54 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
62 static void smbcli_transport_error(void *private, NTSTATUS status)
64 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
65 smbcli_transport_dead(transport, status);
68 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
71 create a transport structure based on an established socket
73 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
74 TALLOC_CTX *parent_ctx, BOOL primary)
76 struct smbcli_transport *transport;
78 transport = talloc_zero(parent_ctx, struct smbcli_transport);
79 if (!transport) return NULL;
82 transport->socket = talloc_steal(transport, sock);
84 transport->socket = talloc_reference(transport, sock);
86 transport->negotiate.protocol = PROTOCOL_NT1;
87 transport->options.use_spnego = lp_use_spnego() && lp_nt_status_support();
88 transport->options.max_xmit = lp_max_xmit();
89 transport->options.max_mux = lp_maxmux();
90 transport->options.request_timeout = SMB_REQUEST_TIMEOUT;
92 transport->negotiate.max_xmit = transport->options.max_xmit;
94 /* setup the stream -> packet parser */
95 transport->packet = packet_init(transport);
96 if (transport->packet == NULL) {
97 talloc_free(transport);
100 packet_set_private(transport->packet, transport);
101 packet_set_socket(transport->packet, transport->socket->sock);
102 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
103 packet_set_full_request(transport->packet, packet_full_request_nbt);
104 packet_set_error_handler(transport->packet, smbcli_transport_error);
105 packet_set_event_context(transport->packet, transport->socket->event.ctx);
106 packet_set_nofree(transport->packet);
108 smbcli_init_signing(transport);
110 ZERO_STRUCT(transport->called);
112 /* take over event handling from the socket layer - it only
113 handles events up until we are connected */
114 talloc_free(transport->socket->event.fde);
115 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
116 transport->socket->sock,
117 socket_get_fd(transport->socket->sock),
119 smbcli_transport_event_handler,
122 packet_set_fde(transport->packet, transport->socket->event.fde);
123 packet_set_serialise(transport->packet);
124 talloc_set_destructor(transport, transport_destructor);
130 mark the transport as dead
132 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
134 smbcli_sock_dead(transport->socket);
136 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
137 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
140 /* kill only the first pending receive - this is so that if
141 that async function frees the connection we don't die trying
142 to use old memory. The caller has to cope with only one
144 if (transport->pending_recv) {
145 struct smbcli_request *req = transport->pending_recv;
146 req->state = SMBCLI_REQUEST_ERROR;
147 req->status = status;
148 DLIST_REMOVE(transport->pending_recv, req);
157 send a session request
159 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
160 struct nbt_name *calling,
161 struct nbt_name *called)
164 struct smbcli_request *req;
165 DATA_BLOB calling_blob, called_blob;
166 TALLOC_CTX *tmp_ctx = talloc_new(transport);
169 status = nbt_name_dup(transport, called, &transport->called);
170 if (!NT_STATUS_IS_OK(status)) goto failed;
172 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
173 if (!NT_STATUS_IS_OK(status)) goto failed;
175 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
176 if (!NT_STATUS_IS_OK(status)) goto failed;
178 /* allocate output buffer */
179 req = smbcli_request_setup_nonsmb(transport,
181 calling_blob.length + called_blob.length);
182 if (req == NULL) goto failed;
184 /* put in the destination name */
185 p = req->out.buffer + NBT_HDR_SIZE;
186 memcpy(p, called_blob.data, called_blob.length);
187 p += called_blob.length;
189 memcpy(p, calling_blob.data, calling_blob.length);
190 p += calling_blob.length;
192 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
193 SCVAL(req->out.buffer,0,0x81);
195 if (!smbcli_request_send(req)) {
196 smbcli_request_destroy(req);
200 talloc_free(tmp_ctx);
204 talloc_free(tmp_ctx);
209 map a session request error to a NTSTATUS
211 static NTSTATUS map_session_refused_error(uint8_t error)
216 return NT_STATUS_REMOTE_NOT_LISTENING;
218 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
220 return NT_STATUS_REMOTE_RESOURCES;
222 return NT_STATUS_UNEXPECTED_IO_ERROR;
227 finish a smbcli_transport_connect()
229 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
233 if (!smbcli_request_receive(req)) {
234 smbcli_request_destroy(req);
235 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
238 switch (CVAL(req->in.buffer,0)) {
240 status = NT_STATUS_OK;
243 status = map_session_refused_error(CVAL(req->in.buffer,4));
246 DEBUG(1,("Warning: session retarget not supported\n"));
247 status = NT_STATUS_NOT_SUPPORTED;
250 status = NT_STATUS_UNEXPECTED_IO_ERROR;
254 smbcli_request_destroy(req);
260 send a session request (if needed)
262 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
263 struct nbt_name *calling,
264 struct nbt_name *called)
266 struct smbcli_request *req;
269 if (transport->socket->port == 445) {
273 req = smbcli_transport_connect_send(transport,
275 status = smbcli_transport_connect_recv(req);
276 return NT_STATUS_IS_OK(status);
279 /****************************************************************************
280 get next mid in sequence
281 ****************************************************************************/
282 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
285 struct smbcli_request *req;
287 mid = transport->next_mid;
290 /* now check to see if this mid is being used by one of the
291 pending requests. This is quite efficient because the list is
292 usually very short */
294 /* the zero mid is reserved for requests that don't have a mid */
295 if (mid == 0) mid = 1;
297 for (req=transport->pending_recv; req; req=req->next) {
298 if (req->mid == mid) {
304 transport->next_mid = mid+1;
308 static void idle_handler(struct event_context *ev,
309 struct timed_event *te, struct timeval t, void *private)
311 struct smbcli_transport *transport = talloc_get_type(private,
312 struct smbcli_transport);
313 struct timeval next = timeval_add(&t, 0, transport->idle.period);
314 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
317 idle_handler, transport);
318 transport->idle.func(transport, transport->idle.private);
322 setup the idle handler for a transport
323 the period is in microseconds
325 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
326 void (*idle_func)(struct smbcli_transport *, void *),
330 transport->idle.func = idle_func;
331 transport->idle.private = private;
332 transport->idle.period = period;
334 if (transport->socket->event.te != NULL) {
335 talloc_free(transport->socket->event.te);
338 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
340 timeval_current_ofs(0, period),
341 idle_handler, transport);
345 we have a full request in our receive buffer - match it to a pending request
348 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
350 struct smbcli_transport *transport = talloc_get_type(private,
351 struct smbcli_transport);
352 uint8_t *buffer, *hdr, *vwv;
354 uint16_t wct=0, mid = 0, op = 0;
355 struct smbcli_request *req = NULL;
360 hdr = buffer+NBT_HDR_SIZE;
363 /* see if it could be an oplock break request */
364 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
369 /* at this point we need to check for a readbraw reply, as
370 these can be any length */
371 if (transport->readbraw_pending) {
372 transport->readbraw_pending = 0;
374 /* it must match the first entry in the pending queue
375 as the client is not allowed to have outstanding
377 req = transport->pending_recv;
378 if (!req) goto error;
380 req->in.buffer = buffer;
381 talloc_steal(req, buffer);
383 req->in.allocated = req->in.size;
387 if (len >= MIN_SMB_SIZE) {
388 /* extract the mid for matching to pending requests */
389 mid = SVAL(hdr, HDR_MID);
390 wct = CVAL(hdr, HDR_WCT);
391 op = CVAL(hdr, HDR_COM);
394 /* match the incoming request against the list of pending requests */
395 for (req=transport->pending_recv; req; req=req->next) {
396 if (req->mid == mid) break;
399 /* see if it's a ntcancel reply for the current MID */
400 req = smbcli_handle_ntcancel_reply(req, len, hdr);
403 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
407 /* fill in the 'in' portion of the matching request */
408 req->in.buffer = buffer;
409 talloc_steal(req, buffer);
411 req->in.allocated = req->in.size;
413 /* handle NBT session replies */
414 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
415 req->status = NT_STATUS_OK;
419 /* handle non-SMB replies */
420 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
421 req->state = SMBCLI_REQUEST_ERROR;
425 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
426 DEBUG(2,("bad reply size for mid %d\n", mid));
427 req->status = NT_STATUS_UNSUCCESSFUL;
428 req->state = SMBCLI_REQUEST_ERROR;
435 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
436 req->in.data = req->in.vwv + VWV(wct) + 2;
437 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
438 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
439 DEBUG(3,("bad data size for mid %d\n", mid));
440 /* blergh - w2k3 gives a bogus data size values in some
442 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
445 req->in.ptr = req->in.data;
446 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
448 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
449 int class = CVAL(req->in.hdr,HDR_RCLS);
450 int code = SVAL(req->in.hdr,HDR_ERR);
451 if (class == 0 && code == 0) {
452 transport->error.e.nt_status = NT_STATUS_OK;
454 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
457 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
460 req->status = transport->error.e.nt_status;
461 if (NT_STATUS_IS_OK(req->status)) {
462 transport->error.etype = ETYPE_NONE;
464 transport->error.etype = ETYPE_SMB;
467 if (!smbcli_request_check_sign_mac(req)) {
468 transport->error.etype = ETYPE_SOCKET;
469 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
470 req->state = SMBCLI_REQUEST_ERROR;
471 req->status = NT_STATUS_ACCESS_DENIED;
476 /* if this request has an async handler then call that to
477 notify that the reply has been received. This might destroy
478 the request so it must happen last */
479 DLIST_REMOVE(transport->pending_recv, req);
480 req->state = SMBCLI_REQUEST_DONE;
488 DLIST_REMOVE(transport->pending_recv, req);
489 req->state = SMBCLI_REQUEST_ERROR;
500 process some read/write requests that are pending
501 return False if the socket is dead
503 BOOL smbcli_transport_process(struct smbcli_transport *transport)
508 packet_queue_run(transport->packet);
509 if (transport->socket->sock == NULL) {
513 status = socket_pending(transport->socket->sock, &npending);
514 if (NT_STATUS_IS_OK(status) && npending > 0) {
515 packet_recv(transport->packet);
517 if (transport->socket->sock == NULL) {
524 handle timeouts of individual smb requests
526 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
527 struct timeval t, void *private)
529 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
531 if (req->state == SMBCLI_REQUEST_RECV) {
532 DLIST_REMOVE(req->transport->pending_recv, req);
534 req->status = NT_STATUS_IO_TIMEOUT;
535 req->state = SMBCLI_REQUEST_ERROR;
545 static int smbcli_request_destructor(struct smbcli_request *req)
547 if (req->state == SMBCLI_REQUEST_RECV) {
548 DLIST_REMOVE(req->transport->pending_recv, req);
555 put a request into the send queue
557 void smbcli_transport_send(struct smbcli_request *req)
562 /* check if the transport is dead */
563 if (req->transport->socket->sock == NULL) {
564 req->state = SMBCLI_REQUEST_ERROR;
565 req->status = NT_STATUS_NET_WRITE_FAULT;
569 blob = data_blob_const(req->out.buffer, req->out.size);
570 status = packet_send(req->transport->packet, blob);
571 if (!NT_STATUS_IS_OK(status)) {
572 req->state = SMBCLI_REQUEST_ERROR;
573 req->status = status;
577 if (req->one_way_request) {
578 req->state = SMBCLI_REQUEST_DONE;
579 smbcli_request_destroy(req);
583 req->state = SMBCLI_REQUEST_RECV;
584 DLIST_ADD(req->transport->pending_recv, req);
587 if (req->transport->options.request_timeout) {
588 event_add_timed(req->transport->socket->event.ctx, req,
589 timeval_current_ofs(req->transport->options.request_timeout, 0),
590 smbcli_timeout_handler, req);
593 talloc_set_destructor(req, smbcli_request_destructor);
597 /****************************************************************************
598 Send an SMBecho (async send)
599 *****************************************************************************/
600 struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
603 struct smbcli_request *req;
605 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
606 if (!req) return NULL;
608 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
610 memcpy(req->out.data, p->in.data, p->in.size);
614 if (!smbcli_request_send(req)) {
615 smbcli_request_destroy(req);
622 /****************************************************************************
623 raw echo interface (async recv)
624 ****************************************************************************/
625 NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
628 if (!smbcli_request_receive(req) ||
629 smbcli_request_is_error(req)) {
633 SMBCLI_CHECK_WCT(req, 1);
635 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
636 p->out.size = req->in.data_size;
637 talloc_free(p->out.data);
638 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
639 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
641 if (!smbcli_raw_pull_data(req, req->in.data, p->out.size, p->out.data)) {
642 req->status = NT_STATUS_BUFFER_TOO_SMALL;
645 if (p->out.count == p->in.repeat_count) {
646 return smbcli_request_destroy(req);
652 return smbcli_request_destroy(req);
655 /****************************************************************************
656 Send a echo (sync interface)
657 *****************************************************************************/
658 NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
660 struct smbcli_request *req = smb_raw_echo_send(transport, p);
661 return smbcli_request_simple_recv(req);