2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include "libcli/raw/libcliraw.h"
25 #include "lib/socket/socket.h"
26 #include "lib/util/dlinklist.h"
27 #include "lib/events/events.h"
28 #include "lib/stream/packet.h"
29 #include "librpc/gen_ndr/ndr_nbt.h"
33 an event has happened on the socket
35 static void smbcli_transport_event_handler(struct event_context *ev,
37 uint16_t flags, void *private)
39 struct smbcli_transport *transport = talloc_get_type(private,
40 struct smbcli_transport);
41 if (flags & EVENT_FD_READ) {
42 packet_recv(transport->packet);
45 if (flags & EVENT_FD_WRITE) {
46 packet_queue_run(transport->packet);
53 static int transport_destructor(struct smbcli_transport *transport)
55 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
63 static void smbcli_transport_error(void *private, NTSTATUS status)
65 struct smbcli_transport *transport = talloc_get_type(private, struct smbcli_transport);
66 smbcli_transport_dead(transport, status);
69 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob);
72 create a transport structure based on an established socket
74 struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
75 TALLOC_CTX *parent_ctx, BOOL primary)
77 struct smbcli_transport *transport;
79 transport = talloc_zero(parent_ctx, struct smbcli_transport);
80 if (!transport) return NULL;
83 transport->socket = talloc_steal(transport, sock);
85 transport->socket = talloc_reference(transport, sock);
87 transport->negotiate.protocol = PROTOCOL_NT1;
88 transport->options.use_spnego = lp_use_spnego() && lp_nt_status_support();
89 transport->options.max_xmit = lp_max_xmit();
90 transport->options.max_mux = lp_maxmux();
91 transport->options.request_timeout = SMB_REQUEST_TIMEOUT;
93 transport->negotiate.max_xmit = transport->options.max_xmit;
95 /* setup the stream -> packet parser */
96 transport->packet = packet_init(transport);
97 if (transport->packet == NULL) {
98 talloc_free(transport);
101 packet_set_private(transport->packet, transport);
102 packet_set_socket(transport->packet, transport->socket->sock);
103 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
104 packet_set_full_request(transport->packet, packet_full_request_nbt);
105 packet_set_error_handler(transport->packet, smbcli_transport_error);
106 packet_set_event_context(transport->packet, transport->socket->event.ctx);
107 packet_set_nofree(transport->packet);
109 smbcli_init_signing(transport);
111 ZERO_STRUCT(transport->called);
113 /* take over event handling from the socket layer - it only
114 handles events up until we are connected */
115 talloc_free(transport->socket->event.fde);
116 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
117 transport->socket->sock,
118 socket_get_fd(transport->socket->sock),
120 smbcli_transport_event_handler,
123 packet_set_fde(transport->packet, transport->socket->event.fde);
124 packet_set_serialise(transport->packet);
125 talloc_set_destructor(transport, transport_destructor);
131 mark the transport as dead
133 void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
135 smbcli_sock_dead(transport->socket);
137 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
138 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
141 /* kill only the first pending receive - this is so that if
142 that async function frees the connection we don't die trying
143 to use old memory. The caller has to cope with only one
145 if (transport->pending_recv) {
146 struct smbcli_request *req = transport->pending_recv;
147 req->state = SMBCLI_REQUEST_ERROR;
148 req->status = status;
149 DLIST_REMOVE(transport->pending_recv, req);
158 send a session request
160 struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
161 struct nbt_name *calling,
162 struct nbt_name *called)
165 struct smbcli_request *req;
166 DATA_BLOB calling_blob, called_blob;
167 TALLOC_CTX *tmp_ctx = talloc_new(transport);
170 status = nbt_name_dup(transport, called, &transport->called);
171 if (!NT_STATUS_IS_OK(status)) goto failed;
173 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
174 if (!NT_STATUS_IS_OK(status)) goto failed;
176 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
177 if (!NT_STATUS_IS_OK(status)) goto failed;
179 /* allocate output buffer */
180 req = smbcli_request_setup_nonsmb(transport,
182 calling_blob.length + called_blob.length);
183 if (req == NULL) goto failed;
185 /* put in the destination name */
186 p = req->out.buffer + NBT_HDR_SIZE;
187 memcpy(p, called_blob.data, called_blob.length);
188 p += called_blob.length;
190 memcpy(p, calling_blob.data, calling_blob.length);
191 p += calling_blob.length;
193 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
194 SCVAL(req->out.buffer,0,0x81);
196 if (!smbcli_request_send(req)) {
197 smbcli_request_destroy(req);
201 talloc_free(tmp_ctx);
205 talloc_free(tmp_ctx);
210 map a session request error to a NTSTATUS
212 static NTSTATUS map_session_refused_error(uint8_t error)
217 return NT_STATUS_REMOTE_NOT_LISTENING;
219 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
221 return NT_STATUS_REMOTE_RESOURCES;
223 return NT_STATUS_UNEXPECTED_IO_ERROR;
228 finish a smbcli_transport_connect()
230 NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
234 if (!smbcli_request_receive(req)) {
235 smbcli_request_destroy(req);
236 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
239 switch (CVAL(req->in.buffer,0)) {
241 status = NT_STATUS_OK;
244 status = map_session_refused_error(CVAL(req->in.buffer,4));
247 DEBUG(1,("Warning: session retarget not supported\n"));
248 status = NT_STATUS_NOT_SUPPORTED;
251 status = NT_STATUS_UNEXPECTED_IO_ERROR;
255 smbcli_request_destroy(req);
261 send a session request (if needed)
263 BOOL smbcli_transport_connect(struct smbcli_transport *transport,
264 struct nbt_name *calling,
265 struct nbt_name *called)
267 struct smbcli_request *req;
270 if (transport->socket->port == 445) {
274 req = smbcli_transport_connect_send(transport,
276 status = smbcli_transport_connect_recv(req);
277 return NT_STATUS_IS_OK(status);
280 /****************************************************************************
281 get next mid in sequence
282 ****************************************************************************/
283 uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
286 struct smbcli_request *req;
288 mid = transport->next_mid;
291 /* now check to see if this mid is being used by one of the
292 pending requests. This is quite efficient because the list is
293 usually very short */
295 /* the zero mid is reserved for requests that don't have a mid */
296 if (mid == 0) mid = 1;
298 for (req=transport->pending_recv; req; req=req->next) {
299 if (req->mid == mid) {
305 transport->next_mid = mid+1;
309 static void idle_handler(struct event_context *ev,
310 struct timed_event *te, struct timeval t, void *private)
312 struct smbcli_transport *transport = talloc_get_type(private,
313 struct smbcli_transport);
314 struct timeval next = timeval_add(&t, 0, transport->idle.period);
315 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
318 idle_handler, transport);
319 transport->idle.func(transport, transport->idle.private);
323 setup the idle handler for a transport
324 the period is in microseconds
326 void smbcli_transport_idle_handler(struct smbcli_transport *transport,
327 void (*idle_func)(struct smbcli_transport *, void *),
331 transport->idle.func = idle_func;
332 transport->idle.private = private;
333 transport->idle.period = period;
335 if (transport->socket->event.te != NULL) {
336 talloc_free(transport->socket->event.te);
339 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
341 timeval_current_ofs(0, period),
342 idle_handler, transport);
346 we have a full request in our receive buffer - match it to a pending request
349 static NTSTATUS smbcli_transport_finish_recv(void *private, DATA_BLOB blob)
351 struct smbcli_transport *transport = talloc_get_type(private,
352 struct smbcli_transport);
353 uint8_t *buffer, *hdr, *vwv;
355 uint16_t wct=0, mid = 0, op = 0;
356 struct smbcli_request *req = NULL;
361 hdr = buffer+NBT_HDR_SIZE;
364 /* see if it could be an oplock break request */
365 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
370 /* at this point we need to check for a readbraw reply, as
371 these can be any length */
372 if (transport->readbraw_pending) {
373 transport->readbraw_pending = 0;
375 /* it must match the first entry in the pending queue
376 as the client is not allowed to have outstanding
378 req = transport->pending_recv;
379 if (!req) goto error;
381 req->in.buffer = buffer;
382 talloc_steal(req, buffer);
384 req->in.allocated = req->in.size;
388 if (len >= MIN_SMB_SIZE) {
389 /* extract the mid for matching to pending requests */
390 mid = SVAL(hdr, HDR_MID);
391 wct = CVAL(hdr, HDR_WCT);
392 op = CVAL(hdr, HDR_COM);
395 /* match the incoming request against the list of pending requests */
396 for (req=transport->pending_recv; req; req=req->next) {
397 if (req->mid == mid) break;
400 /* see if it's a ntcancel reply for the current MID */
401 req = smbcli_handle_ntcancel_reply(req, len, hdr);
404 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
408 /* fill in the 'in' portion of the matching request */
409 req->in.buffer = buffer;
410 talloc_steal(req, buffer);
412 req->in.allocated = req->in.size;
414 /* handle NBT session replies */
415 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
416 req->status = NT_STATUS_OK;
420 /* handle non-SMB replies */
421 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
422 req->state = SMBCLI_REQUEST_ERROR;
426 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
427 DEBUG(2,("bad reply size for mid %d\n", mid));
428 req->status = NT_STATUS_UNSUCCESSFUL;
429 req->state = SMBCLI_REQUEST_ERROR;
436 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
437 req->in.data = req->in.vwv + VWV(wct) + 2;
438 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
439 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
440 DEBUG(3,("bad data size for mid %d\n", mid));
441 /* blergh - w2k3 gives a bogus data size values in some
443 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
446 req->in.ptr = req->in.data;
447 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
449 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
450 int class = CVAL(req->in.hdr,HDR_RCLS);
451 int code = SVAL(req->in.hdr,HDR_ERR);
452 if (class == 0 && code == 0) {
453 transport->error.e.nt_status = NT_STATUS_OK;
455 transport->error.e.nt_status = NT_STATUS_DOS(class, code);
458 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
461 req->status = transport->error.e.nt_status;
462 if (NT_STATUS_IS_OK(req->status)) {
463 transport->error.etype = ETYPE_NONE;
465 transport->error.etype = ETYPE_SMB;
468 if (!smbcli_request_check_sign_mac(req)) {
469 transport->error.etype = ETYPE_SOCKET;
470 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
471 req->state = SMBCLI_REQUEST_ERROR;
472 req->status = NT_STATUS_ACCESS_DENIED;
477 /* if this request has an async handler then call that to
478 notify that the reply has been received. This might destroy
479 the request so it must happen last */
480 DLIST_REMOVE(transport->pending_recv, req);
481 req->state = SMBCLI_REQUEST_DONE;
489 DLIST_REMOVE(transport->pending_recv, req);
490 req->state = SMBCLI_REQUEST_ERROR;
501 process some read/write requests that are pending
502 return False if the socket is dead
504 BOOL smbcli_transport_process(struct smbcli_transport *transport)
509 packet_queue_run(transport->packet);
510 if (transport->socket->sock == NULL) {
514 status = socket_pending(transport->socket->sock, &npending);
515 if (NT_STATUS_IS_OK(status) && npending > 0) {
516 packet_recv(transport->packet);
518 if (transport->socket->sock == NULL) {
525 handle timeouts of individual smb requests
527 static void smbcli_timeout_handler(struct event_context *ev, struct timed_event *te,
528 struct timeval t, void *private)
530 struct smbcli_request *req = talloc_get_type(private, struct smbcli_request);
532 if (req->state == SMBCLI_REQUEST_RECV) {
533 DLIST_REMOVE(req->transport->pending_recv, req);
535 req->status = NT_STATUS_IO_TIMEOUT;
536 req->state = SMBCLI_REQUEST_ERROR;
546 static int smbcli_request_destructor(struct smbcli_request *req)
548 if (req->state == SMBCLI_REQUEST_RECV) {
549 DLIST_REMOVE(req->transport->pending_recv, req);
556 put a request into the send queue
558 void smbcli_transport_send(struct smbcli_request *req)
563 /* check if the transport is dead */
564 if (req->transport->socket->sock == NULL) {
565 req->state = SMBCLI_REQUEST_ERROR;
566 req->status = NT_STATUS_NET_WRITE_FAULT;
570 blob = data_blob_const(req->out.buffer, req->out.size);
571 status = packet_send(req->transport->packet, blob);
572 if (!NT_STATUS_IS_OK(status)) {
573 req->state = SMBCLI_REQUEST_ERROR;
574 req->status = status;
578 if (req->one_way_request) {
579 req->state = SMBCLI_REQUEST_DONE;
580 smbcli_request_destroy(req);
584 req->state = SMBCLI_REQUEST_RECV;
585 DLIST_ADD(req->transport->pending_recv, req);
588 if (req->transport->options.request_timeout) {
589 event_add_timed(req->transport->socket->event.ctx, req,
590 timeval_current_ofs(req->transport->options.request_timeout, 0),
591 smbcli_timeout_handler, req);
594 talloc_set_destructor(req, smbcli_request_destructor);