2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "lib/util/iov_buf.h"
29 #include "../libcli/smb/smb_common.h"
30 #include "../libcli/smb/smb_seal.h"
31 #include "../libcli/smb/smb_signing.h"
32 #include "../libcli/smb/read_smb.h"
33 #include "smbXcli_base.h"
34 #include "librpc/ndr/libndr.h"
35 #include "libcli/smb/smb2_negotiate_context.h"
36 #include "lib/crypto/sha512.h"
37 #include "lib/crypto/aes.h"
38 #include "lib/crypto/aes_ccm_128.h"
39 #include "lib/crypto/aes_gcm_128.h"
43 struct smbXcli_session;
48 struct sockaddr_storage local_ss;
49 struct sockaddr_storage remote_ss;
50 const char *remote_name;
52 struct tevent_queue *outgoing;
53 struct tevent_req **pending;
54 struct tevent_req *read_smb_req;
55 struct tevent_req *suicide_req;
57 enum protocol_types min_protocol;
58 enum protocol_types max_protocol;
59 enum protocol_types protocol;
62 bool mandatory_signing;
65 * The incoming dispatch function should return:
66 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
67 * - NT_STATUS_OK, if no more processing is desired, e.g.
68 * the dispatch function called
70 * - All other return values disconnect the connection.
72 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
78 uint32_t capabilities;
83 uint32_t capabilities;
86 uint16_t security_mode;
95 const char *workgroup;
101 uint32_t capabilities;
106 struct smb_signing_state *signing;
107 struct smb_trans_enc_state *trans_enc;
109 struct tevent_req *read_braw_req;
114 uint32_t capabilities;
115 uint16_t security_mode;
120 uint32_t capabilities;
121 uint16_t security_mode;
123 uint32_t max_trans_size;
124 uint32_t max_read_size;
125 uint32_t max_write_size;
133 uint16_t cur_credits;
134 uint16_t max_credits;
136 uint32_t cc_chunk_len;
137 uint32_t cc_max_chunks;
141 bool force_channel_sequence;
143 uint8_t preauth_sha512[64];
146 struct smbXcli_session *sessions;
149 struct smb2cli_session {
151 uint16_t session_flags;
152 DATA_BLOB application_key;
153 DATA_BLOB signing_key;
156 DATA_BLOB encryption_key;
157 DATA_BLOB decryption_key;
158 uint64_t nonce_high_random;
159 uint64_t nonce_high_max;
162 uint16_t channel_sequence;
166 struct smbXcli_session {
167 struct smbXcli_session *prev, *next;
168 struct smbXcli_conn *conn;
173 DATA_BLOB application_key;
177 struct smb2cli_session *smb2;
180 DATA_BLOB signing_key;
181 uint8_t preauth_sha512[64];
185 * this should be a short term hack
186 * until the upper layers have implemented
189 bool disconnect_expired;
192 struct smbXcli_tcon {
194 uint32_t fs_attributes;
198 uint16_t optional_support;
199 uint32_t maximal_access;
200 uint32_t guest_maximal_access;
209 uint32_t capabilities;
210 uint32_t maximal_access;
216 struct smbXcli_req_state {
217 struct tevent_context *ev;
218 struct smbXcli_conn *conn;
219 struct smbXcli_session *session; /* maybe NULL */
220 struct smbXcli_tcon *tcon; /* maybe NULL */
222 uint8_t length_hdr[4];
228 struct tevent_req *write_req;
231 /* Space for the header including the wct */
232 uint8_t hdr[HDR_VWV];
235 * For normal requests, smb1cli_req_send chooses a mid.
236 * SecondaryV trans requests need to use the mid of the primary
237 * request, so we need a place to store it.
238 * Assume it is set if != 0.
243 uint8_t bytecount_buf[2];
245 #define MAX_SMB_IOV 10
246 /* length_hdr, hdr, words, byte_count, buffers */
247 struct iovec iov[1 + 3 + MAX_SMB_IOV];
252 struct tevent_req **chained_requests;
255 NTSTATUS recv_status;
256 /* always an array of 3 talloc elements */
257 struct iovec *recv_iov;
261 const uint8_t *fixed;
266 uint8_t transform[SMB2_TF_HDR_SIZE];
267 uint8_t hdr[SMB2_HDR_BODY];
268 uint8_t pad[7]; /* padding space for compounding */
271 * always an array of 3 talloc elements
272 * (without a SMB2_TRANSFORM header!)
276 struct iovec *recv_iov;
279 * the expected max for the response dyn_len
281 uint32_t max_dyn_len;
283 uint16_t credit_charge;
287 uint64_t encryption_session_id;
289 bool signing_skipped;
292 uint16_t cancel_flags;
298 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
301 * NT_STATUS_OK, means we do not notify the callers
303 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
305 while (conn->sessions) {
306 conn->sessions->conn = NULL;
307 DLIST_REMOVE(conn->sessions, conn->sessions);
310 if (conn->smb1.trans_enc) {
311 TALLOC_FREE(conn->smb1.trans_enc);
317 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
319 const char *remote_name,
320 enum smb_signing_setting signing_state,
321 uint32_t smb1_capabilities,
322 struct GUID *client_guid,
323 uint32_t smb2_capabilities)
325 struct smbXcli_conn *conn = NULL;
327 struct sockaddr *sa = NULL;
331 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
338 conn->remote_name = talloc_strdup(conn, remote_name);
339 if (conn->remote_name == NULL) {
343 ss = (void *)&conn->local_ss;
344 sa = (struct sockaddr *)ss;
345 sa_length = sizeof(conn->local_ss);
346 ret = getsockname(fd, sa, &sa_length);
350 ss = (void *)&conn->remote_ss;
351 sa = (struct sockaddr *)ss;
352 sa_length = sizeof(conn->remote_ss);
353 ret = getpeername(fd, sa, &sa_length);
358 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
359 if (conn->outgoing == NULL) {
362 conn->pending = NULL;
364 conn->min_protocol = PROTOCOL_NONE;
365 conn->max_protocol = PROTOCOL_NONE;
366 conn->protocol = PROTOCOL_NONE;
368 switch (signing_state) {
369 case SMB_SIGNING_OFF:
371 conn->allow_signing = false;
372 conn->desire_signing = false;
373 conn->mandatory_signing = false;
375 case SMB_SIGNING_DEFAULT:
376 case SMB_SIGNING_IF_REQUIRED:
377 /* if the server requires it */
378 conn->allow_signing = true;
379 conn->desire_signing = false;
380 conn->mandatory_signing = false;
382 case SMB_SIGNING_DESIRED:
383 /* if the server desires it */
384 conn->allow_signing = true;
385 conn->desire_signing = true;
386 conn->mandatory_signing = false;
388 case SMB_SIGNING_IPC_DEFAULT:
389 case SMB_SIGNING_REQUIRED:
391 conn->allow_signing = true;
392 conn->desire_signing = true;
393 conn->mandatory_signing = true;
397 conn->smb1.client.capabilities = smb1_capabilities;
398 conn->smb1.client.max_xmit = UINT16_MAX;
400 conn->smb1.capabilities = conn->smb1.client.capabilities;
401 conn->smb1.max_xmit = 1024;
405 /* initialise signing */
406 conn->smb1.signing = smb_signing_init(conn,
408 conn->desire_signing,
409 conn->mandatory_signing);
410 if (!conn->smb1.signing) {
414 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
415 if (conn->mandatory_signing) {
416 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
419 conn->smb2.client.guid = *client_guid;
421 conn->smb2.client.capabilities = smb2_capabilities;
423 conn->smb2.cur_credits = 1;
424 conn->smb2.max_credits = 0;
425 conn->smb2.io_priority = 1;
428 * Samba and Windows servers accept a maximum of 16 MiB with a maximum
429 * chunk length of 1 MiB.
431 conn->smb2.cc_chunk_len = 1024 * 1024;
432 conn->smb2.cc_max_chunks = 16;
434 talloc_set_destructor(conn, smbXcli_conn_destructor);
442 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
448 if (conn->sock_fd == -1) {
455 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
457 return conn->protocol;
460 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
462 if (conn->protocol >= PROTOCOL_SMB2_02) {
466 if (conn->smb1.capabilities & CAP_UNICODE) {
473 bool smbXcli_conn_signing_mandatory(struct smbXcli_conn *conn)
475 return conn->mandatory_signing;
479 * [MS-SMB] 2.2.2.3.5 - SMB1 support for passing through
480 * query/set commands to the file system
482 bool smbXcli_conn_support_passthrough(struct smbXcli_conn *conn)
484 if (conn->protocol >= PROTOCOL_SMB2_02) {
488 if (conn->smb1.capabilities & CAP_W2K_SMBS) {
495 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
497 set_socket_options(conn->sock_fd, options);
500 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
502 return &conn->local_ss;
505 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
507 return &conn->remote_ss;
510 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
512 return conn->remote_name;
515 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
517 if (conn->protocol >= PROTOCOL_SMB2_02) {
524 return conn->smb1.server.max_mux;
527 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
529 if (conn->protocol >= PROTOCOL_SMB2_02) {
530 return conn->smb2.server.system_time;
533 return conn->smb1.server.system_time;
536 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
538 if (conn->protocol >= PROTOCOL_SMB2_02) {
539 return &conn->smb2.server.gss_blob;
542 return &conn->smb1.server.gss_blob;
545 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
547 if (conn->protocol >= PROTOCOL_SMB2_02) {
548 return &conn->smb2.server.guid;
551 return &conn->smb1.server.guid;
554 bool smbXcli_conn_get_force_channel_sequence(struct smbXcli_conn *conn)
556 return conn->smb2.force_channel_sequence;
559 void smbXcli_conn_set_force_channel_sequence(struct smbXcli_conn *conn,
562 conn->smb2.force_channel_sequence = v;
565 struct smbXcli_conn_samba_suicide_state {
566 struct smbXcli_conn *conn;
569 struct tevent_req *write_req;
572 static void smbXcli_conn_samba_suicide_cleanup(struct tevent_req *req,
573 enum tevent_req_state req_state);
574 static void smbXcli_conn_samba_suicide_done(struct tevent_req *subreq);
576 struct tevent_req *smbXcli_conn_samba_suicide_send(TALLOC_CTX *mem_ctx,
577 struct tevent_context *ev,
578 struct smbXcli_conn *conn,
581 struct tevent_req *req, *subreq;
582 struct smbXcli_conn_samba_suicide_state *state;
584 req = tevent_req_create(mem_ctx, &state,
585 struct smbXcli_conn_samba_suicide_state);
590 SIVAL(state->buf, 4, 0x74697865);
591 SCVAL(state->buf, 8, exitcode);
592 _smb_setlen_nbt(state->buf, sizeof(state->buf)-4);
594 if (conn->suicide_req != NULL) {
595 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
596 return tevent_req_post(req, ev);
599 state->iov.iov_base = state->buf;
600 state->iov.iov_len = sizeof(state->buf);
602 subreq = writev_send(state, ev, conn->outgoing, conn->sock_fd,
603 false, &state->iov, 1);
604 if (tevent_req_nomem(subreq, req)) {
605 return tevent_req_post(req, ev);
607 tevent_req_set_callback(subreq, smbXcli_conn_samba_suicide_done, req);
608 state->write_req = subreq;
610 tevent_req_set_cleanup_fn(req, smbXcli_conn_samba_suicide_cleanup);
613 * We need to use tevent_req_defer_callback()
614 * in order to allow smbXcli_conn_disconnect()
615 * to do a safe cleanup.
617 tevent_req_defer_callback(req, ev);
618 conn->suicide_req = req;
623 static void smbXcli_conn_samba_suicide_cleanup(struct tevent_req *req,
624 enum tevent_req_state req_state)
626 struct smbXcli_conn_samba_suicide_state *state = tevent_req_data(
627 req, struct smbXcli_conn_samba_suicide_state);
629 TALLOC_FREE(state->write_req);
631 if (state->conn == NULL) {
635 if (state->conn->suicide_req == req) {
636 state->conn->suicide_req = NULL;
641 static void smbXcli_conn_samba_suicide_done(struct tevent_req *subreq)
643 struct tevent_req *req = tevent_req_callback_data(
644 subreq, struct tevent_req);
645 struct smbXcli_conn_samba_suicide_state *state = tevent_req_data(
646 req, struct smbXcli_conn_samba_suicide_state);
650 state->write_req = NULL;
652 nwritten = writev_recv(subreq, &err);
654 if (nwritten == -1) {
655 /* here, we need to notify all pending requests */
656 NTSTATUS status = map_nt_error_from_unix_common(err);
657 smbXcli_conn_disconnect(state->conn, status);
660 tevent_req_done(req);
663 NTSTATUS smbXcli_conn_samba_suicide_recv(struct tevent_req *req)
665 return tevent_req_simple_recv_ntstatus(req);
668 NTSTATUS smbXcli_conn_samba_suicide(struct smbXcli_conn *conn,
671 TALLOC_CTX *frame = talloc_stackframe();
672 struct tevent_context *ev;
673 struct tevent_req *req;
674 NTSTATUS status = NT_STATUS_NO_MEMORY;
677 if (smbXcli_conn_has_async_calls(conn)) {
679 * Can't use sync call while an async call is in flight
681 status = NT_STATUS_INVALID_PARAMETER_MIX;
684 ev = samba_tevent_context_init(frame);
688 req = smbXcli_conn_samba_suicide_send(frame, ev, conn, exitcode);
692 ok = tevent_req_poll_ntstatus(req, ev, &status);
696 status = smbXcli_conn_samba_suicide_recv(req);
702 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
704 return conn->smb1.capabilities;
707 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
709 return conn->smb1.max_xmit;
712 bool smb1cli_conn_req_possible(struct smbXcli_conn *conn)
714 size_t pending = talloc_array_length(conn->pending);
715 uint16_t possible = conn->smb1.server.max_mux;
717 if (pending >= possible) {
724 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
726 return conn->smb1.server.session_key;
729 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
731 return conn->smb1.server.challenge;
734 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
736 return conn->smb1.server.security_mode;
739 bool smb1cli_conn_server_readbraw(struct smbXcli_conn *conn)
741 return conn->smb1.server.readbraw;
744 bool smb1cli_conn_server_writebraw(struct smbXcli_conn *conn)
746 return conn->smb1.server.writebraw;
749 bool smb1cli_conn_server_lockread(struct smbXcli_conn *conn)
751 return conn->smb1.server.lockread;
754 bool smb1cli_conn_server_writeunlock(struct smbXcli_conn *conn)
756 return conn->smb1.server.writeunlock;
759 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
761 return conn->smb1.server.time_zone;
764 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
765 const DATA_BLOB user_session_key,
766 const DATA_BLOB response)
768 return smb_signing_activate(conn->smb1.signing,
773 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
774 const uint8_t *buf, uint32_t seqnum)
776 const uint8_t *hdr = buf + NBT_HDR_SIZE;
777 size_t len = smb_len_nbt(buf);
779 return smb_signing_check_pdu(conn->smb1.signing, hdr, len, seqnum);
782 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
784 return smb_signing_is_active(conn->smb1.signing);
787 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
788 struct smb_trans_enc_state *es)
790 /* Replace the old state, if any. */
791 if (conn->smb1.trans_enc) {
792 TALLOC_FREE(conn->smb1.trans_enc);
794 conn->smb1.trans_enc = es;
797 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
799 return common_encryption_on(conn->smb1.trans_enc);
803 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
805 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
806 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
808 if (NT_STATUS_IS_OK(status)) {
812 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
816 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
820 * Is the SMB command able to hold an AND_X successor
821 * @param[in] cmd The SMB command in question
822 * @retval Can we add a chained request after "cmd"?
824 bool smb1cli_is_andx_req(uint8_t cmd)
844 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
846 size_t num_pending = talloc_array_length(conn->pending);
849 if (conn->protocol == PROTOCOL_NONE) {
851 * This is what windows sends on the SMB1 Negprot request
852 * and some vendors reuse the SMB1 MID as SMB2 sequence number.
860 result = conn->smb1.mid++;
861 if ((result == 0) || (result == 0xffff)) {
865 for (i=0; i<num_pending; i++) {
866 if (result == smb1cli_req_mid(conn->pending[i])) {
871 if (i == num_pending) {
877 static NTSTATUS smbXcli_req_cancel_write_req(struct tevent_req *req)
879 struct smbXcli_req_state *state =
881 struct smbXcli_req_state);
882 struct smbXcli_conn *conn = state->conn;
883 size_t num_pending = talloc_array_length(conn->pending);
888 if (state->write_req == NULL) {
893 * Check if it's possible to cancel the request.
894 * If the result is true it's not too late.
895 * See writev_cancel().
897 ok = tevent_req_cancel(state->write_req);
899 TALLOC_FREE(state->write_req);
901 if (conn->protocol >= PROTOCOL_SMB2_02) {
903 * SMB2 has a sane signing state.
908 if (num_pending > 1) {
910 * We have more pending requests following us. This
911 * means the signing state will be broken for them.
913 * As a solution we could add the requests directly to
914 * our outgoing queue and do the signing in the trigger
915 * function and then use writev_send() without passing a
916 * queue. That way we'll only sign packets we're most
917 * likely send to the wire.
919 return NT_STATUS_REQUEST_OUT_OF_SEQUENCE;
923 * If we're the only request that's
924 * pending, we're able to recover the signing
927 smb_signing_cancel_reply(conn->smb1.signing,
928 state->smb1.one_way_seqnum);
932 ret = writev_recv(state->write_req, &err);
933 TALLOC_FREE(state->write_req);
935 return map_nt_error_from_unix_common(err);
941 void smbXcli_req_unset_pending(struct tevent_req *req)
943 struct smbXcli_req_state *state =
945 struct smbXcli_req_state);
946 struct smbXcli_conn *conn = state->conn;
947 size_t num_pending = talloc_array_length(conn->pending);
949 NTSTATUS cancel_status;
951 cancel_status = smbXcli_req_cancel_write_req(req);
953 if (state->smb1.mid != 0) {
955 * This is a [nt]trans[2] request which waits
956 * for more than one reply.
958 if (!NT_STATUS_IS_OK(cancel_status)) {
960 * If the write_req cancel didn't work
961 * we can't use the connection anymore.
963 smbXcli_conn_disconnect(conn, cancel_status);
969 tevent_req_set_cleanup_fn(req, NULL);
971 if (num_pending == 1) {
973 * The pending read_smb tevent_req is a child of
974 * conn->pending. So if nothing is pending anymore, we need to
975 * delete the socket read fde.
977 /* TODO: smbXcli_conn_cancel_read_req */
978 TALLOC_FREE(conn->pending);
979 conn->read_smb_req = NULL;
981 if (!NT_STATUS_IS_OK(cancel_status)) {
983 * If the write_req cancel didn't work
984 * we can't use the connection anymore.
986 smbXcli_conn_disconnect(conn, cancel_status);
992 for (i=0; i<num_pending; i++) {
993 if (req == conn->pending[i]) {
997 if (i == num_pending) {
999 * Something's seriously broken. Just returning here is the
1000 * right thing nevertheless, the point of this routine is to
1001 * remove ourselves from conn->pending.
1004 if (!NT_STATUS_IS_OK(cancel_status)) {
1006 * If the write_req cancel didn't work
1007 * we can't use the connection anymore.
1009 smbXcli_conn_disconnect(conn, cancel_status);
1016 * Remove ourselves from the conn->pending array
1018 for (; i < (num_pending - 1); i++) {
1019 conn->pending[i] = conn->pending[i+1];
1023 * No NULL check here, we're shrinking by sizeof(void *), and
1024 * talloc_realloc just adjusts the size for this.
1026 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
1029 if (!NT_STATUS_IS_OK(cancel_status)) {
1031 * If the write_req cancel didn't work
1032 * we can't use the connection anymore.
1034 smbXcli_conn_disconnect(conn, cancel_status);
1040 static void smbXcli_req_cleanup(struct tevent_req *req,
1041 enum tevent_req_state req_state)
1043 struct smbXcli_req_state *state =
1044 tevent_req_data(req,
1045 struct smbXcli_req_state);
1046 struct smbXcli_conn *conn = state->conn;
1047 NTSTATUS cancel_status;
1049 switch (req_state) {
1050 case TEVENT_REQ_RECEIVED:
1052 * Make sure we really remove it from
1053 * the pending array on destruction.
1055 * smbXcli_req_unset_pending() calls
1056 * smbXcli_req_cancel_write_req() internal
1058 state->smb1.mid = 0;
1059 smbXcli_req_unset_pending(req);
1062 cancel_status = smbXcli_req_cancel_write_req(req);
1063 if (!NT_STATUS_IS_OK(cancel_status)) {
1065 * If the write_req cancel didn't work
1066 * we can't use the connection anymore.
1068 smbXcli_conn_disconnect(conn, cancel_status);
1075 static bool smb1cli_req_cancel(struct tevent_req *req);
1076 static bool smb2cli_req_cancel(struct tevent_req *req);
1078 static bool smbXcli_req_cancel(struct tevent_req *req)
1080 struct smbXcli_req_state *state =
1081 tevent_req_data(req,
1082 struct smbXcli_req_state);
1084 if (!smbXcli_conn_is_connected(state->conn)) {
1088 if (state->conn->protocol == PROTOCOL_NONE) {
1092 if (state->conn->protocol >= PROTOCOL_SMB2_02) {
1093 return smb2cli_req_cancel(req);
1096 return smb1cli_req_cancel(req);
1099 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
1101 bool smbXcli_req_set_pending(struct tevent_req *req)
1103 struct smbXcli_req_state *state =
1104 tevent_req_data(req,
1105 struct smbXcli_req_state);
1106 struct smbXcli_conn *conn;
1107 struct tevent_req **pending;
1112 if (!smbXcli_conn_is_connected(conn)) {
1116 num_pending = talloc_array_length(conn->pending);
1118 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
1120 if (pending == NULL) {
1123 pending[num_pending] = req;
1124 conn->pending = pending;
1125 tevent_req_set_cleanup_fn(req, smbXcli_req_cleanup);
1126 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
1128 if (!smbXcli_conn_receive_next(conn)) {
1130 * the caller should notify the current request
1132 * And all other pending requests get notified
1133 * by smbXcli_conn_disconnect().
1135 smbXcli_req_unset_pending(req);
1136 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1143 static void smbXcli_conn_received(struct tevent_req *subreq);
1145 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
1147 size_t num_pending = talloc_array_length(conn->pending);
1148 struct tevent_req *req;
1149 struct smbXcli_req_state *state;
1151 if (conn->read_smb_req != NULL) {
1155 if (num_pending == 0) {
1156 if (conn->smb2.mid < UINT64_MAX) {
1157 /* no more pending requests, so we are done for now */
1162 * If there are no more SMB2 requests possible,
1163 * because we are out of message ids,
1164 * we need to disconnect.
1166 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
1170 req = conn->pending[0];
1171 state = tevent_req_data(req, struct smbXcli_req_state);
1174 * We're the first ones, add the read_smb request that waits for the
1175 * answer from the server
1177 conn->read_smb_req = read_smb_send(conn->pending,
1180 if (conn->read_smb_req == NULL) {
1183 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
1187 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
1189 struct smbXcli_session *session;
1190 int sock_fd = conn->sock_fd;
1192 tevent_queue_stop(conn->outgoing);
1196 session = conn->sessions;
1197 if (talloc_array_length(conn->pending) == 0) {
1199 * if we do not have pending requests
1200 * there is no need to update the channel_sequence
1204 for (; session; session = session->next) {
1205 smb2cli_session_increment_channel_sequence(session);
1208 if (conn->suicide_req != NULL) {
1210 * smbXcli_conn_samba_suicide_send()
1211 * used tevent_req_defer_callback() already.
1213 if (!NT_STATUS_IS_OK(status)) {
1214 tevent_req_nterror(conn->suicide_req, status);
1216 conn->suicide_req = NULL;
1220 * Cancel all pending requests. We do not do a for-loop walking
1221 * conn->pending because that array changes in
1222 * smbXcli_req_unset_pending.
1224 while (talloc_array_length(conn->pending) > 0) {
1225 struct tevent_req *req;
1226 struct smbXcli_req_state *state;
1227 struct tevent_req **chain;
1231 req = conn->pending[0];
1232 state = tevent_req_data(req, struct smbXcli_req_state);
1234 if (state->smb1.chained_requests == NULL) {
1238 * We're dead. No point waiting for trans2
1241 state->smb1.mid = 0;
1243 smbXcli_req_unset_pending(req);
1245 if (NT_STATUS_IS_OK(status)) {
1246 /* do not notify the callers */
1250 in_progress = tevent_req_is_in_progress(req);
1259 * we need to defer the callback, because we may notify
1260 * more then one caller.
1262 tevent_req_defer_callback(req, state->ev);
1263 tevent_req_nterror(req, status);
1267 chain = talloc_move(conn, &state->smb1.chained_requests);
1268 num_chained = talloc_array_length(chain);
1270 for (i=0; i<num_chained; i++) {
1274 state = tevent_req_data(req, struct smbXcli_req_state);
1277 * We're dead. No point waiting for trans2
1280 state->smb1.mid = 0;
1282 smbXcli_req_unset_pending(req);
1284 if (NT_STATUS_IS_OK(status)) {
1285 /* do not notify the callers */
1289 in_progress = tevent_req_is_in_progress(req);
1298 * we need to defer the callback, because we may notify
1299 * more than one caller.
1301 tevent_req_defer_callback(req, state->ev);
1302 tevent_req_nterror(req, status);
1307 if (sock_fd != -1) {
1313 * Fetch a smb request's mid. Only valid after the request has been sent by
1314 * smb1cli_req_send().
1316 uint16_t smb1cli_req_mid(struct tevent_req *req)
1318 struct smbXcli_req_state *state =
1319 tevent_req_data(req,
1320 struct smbXcli_req_state);
1322 if (state->smb1.mid != 0) {
1323 return state->smb1.mid;
1326 return SVAL(state->smb1.hdr, HDR_MID);
1329 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
1331 struct smbXcli_req_state *state =
1332 tevent_req_data(req,
1333 struct smbXcli_req_state);
1335 state->smb1.mid = mid;
1338 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
1340 struct smbXcli_req_state *state =
1341 tevent_req_data(req,
1342 struct smbXcli_req_state);
1344 return state->smb1.seqnum;
1347 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
1349 struct smbXcli_req_state *state =
1350 tevent_req_data(req,
1351 struct smbXcli_req_state);
1353 state->smb1.seqnum = seqnum;
1356 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
1358 ssize_t ret = iov_buflen(iov, count);
1360 /* Ignore the overflow case for now ... */
1364 static void smb1cli_req_flags(enum protocol_types protocol,
1365 uint32_t smb1_capabilities,
1366 uint8_t smb_command,
1367 uint8_t additional_flags,
1368 uint8_t clear_flags,
1370 uint16_t additional_flags2,
1371 uint16_t clear_flags2,
1375 uint16_t flags2 = 0;
1377 if (protocol >= PROTOCOL_LANMAN1) {
1378 flags |= FLAG_CASELESS_PATHNAMES;
1379 flags |= FLAG_CANONICAL_PATHNAMES;
1382 if (protocol >= PROTOCOL_LANMAN2) {
1383 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
1384 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
1387 if (protocol >= PROTOCOL_NT1) {
1388 flags2 |= FLAGS2_IS_LONG_NAME;
1390 if (smb1_capabilities & CAP_UNICODE) {
1391 flags2 |= FLAGS2_UNICODE_STRINGS;
1393 if (smb1_capabilities & CAP_STATUS32) {
1394 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
1396 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
1397 flags2 |= FLAGS2_EXTENDED_SECURITY;
1401 flags |= additional_flags;
1402 flags &= ~clear_flags;
1403 flags2 |= additional_flags2;
1404 flags2 &= ~clear_flags2;
1410 static void smb1cli_req_cancel_done(struct tevent_req *subreq);
1412 static bool smb1cli_req_cancel(struct tevent_req *req)
1414 struct smbXcli_req_state *state =
1415 tevent_req_data(req,
1416 struct smbXcli_req_state);
1421 struct tevent_req *subreq;
1424 flags = CVAL(state->smb1.hdr, HDR_FLG);
1425 flags2 = SVAL(state->smb1.hdr, HDR_FLG2);
1426 pid = SVAL(state->smb1.hdr, HDR_PID);
1427 pid |= SVAL(state->smb1.hdr, HDR_PIDHIGH)<<16;
1428 mid = SVAL(state->smb1.hdr, HDR_MID);
1430 subreq = smb1cli_req_create(state, state->ev,
1440 0, NULL); /* bytes */
1441 if (subreq == NULL) {
1444 smb1cli_req_set_mid(subreq, mid);
1446 status = smb1cli_req_chain_submit(&subreq, 1);
1447 if (!NT_STATUS_IS_OK(status)) {
1448 TALLOC_FREE(subreq);
1451 smb1cli_req_set_mid(subreq, 0);
1453 tevent_req_set_callback(subreq, smb1cli_req_cancel_done, NULL);
1458 static void smb1cli_req_cancel_done(struct tevent_req *subreq)
1460 /* we do not care about the result */
1461 TALLOC_FREE(subreq);
1464 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
1465 struct tevent_context *ev,
1466 struct smbXcli_conn *conn,
1467 uint8_t smb_command,
1468 uint8_t additional_flags,
1469 uint8_t clear_flags,
1470 uint16_t additional_flags2,
1471 uint16_t clear_flags2,
1472 uint32_t timeout_msec,
1474 struct smbXcli_tcon *tcon,
1475 struct smbXcli_session *session,
1476 uint8_t wct, uint16_t *vwv,
1478 struct iovec *bytes_iov)
1480 struct tevent_req *req;
1481 struct smbXcli_req_state *state;
1483 uint16_t flags2 = 0;
1488 if (iov_count > MAX_SMB_IOV) {
1490 * Should not happen :-)
1495 req = tevent_req_create(mem_ctx, &state,
1496 struct smbXcli_req_state);
1502 state->session = session;
1506 uid = session->smb1.session_id;
1510 tid = tcon->smb1.tcon_id;
1512 if (tcon->fs_attributes & FILE_CASE_SENSITIVE_SEARCH) {
1513 clear_flags |= FLAG_CASELESS_PATHNAMES;
1515 /* Default setting, case insensitive. */
1516 additional_flags |= FLAG_CASELESS_PATHNAMES;
1519 if (smbXcli_conn_dfs_supported(conn) &&
1520 smbXcli_tcon_is_dfs_share(tcon))
1522 additional_flags2 |= FLAGS2_DFS_PATHNAMES;
1526 state->smb1.recv_cmd = 0xFF;
1527 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
1528 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
1529 if (state->smb1.recv_iov == NULL) {
1534 smb1cli_req_flags(conn->protocol,
1535 conn->smb1.capabilities,
1544 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
1545 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
1546 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
1547 SCVAL(state->smb1.hdr, HDR_FLG, flags);
1548 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
1549 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
1550 SSVAL(state->smb1.hdr, HDR_TID, tid);
1551 SSVAL(state->smb1.hdr, HDR_PID, pid);
1552 SSVAL(state->smb1.hdr, HDR_UID, uid);
1553 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
1554 SCVAL(state->smb1.hdr, HDR_WCT, wct);
1556 state->smb1.vwv = vwv;
1558 num_bytes = iov_buflen(bytes_iov, iov_count);
1559 if (num_bytes == -1) {
1561 * I'd love to add a check for num_bytes<=UINT16_MAX here, but
1562 * the smbclient->samba connections can lie and transfer more.
1568 SSVAL(state->smb1.bytecount_buf, 0, num_bytes);
1570 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
1571 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
1572 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
1573 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
1574 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
1575 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
1576 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
1577 state->smb1.iov[3].iov_len = sizeof(uint16_t);
1579 if (iov_count != 0) {
1580 memcpy(&state->smb1.iov[4], bytes_iov,
1581 iov_count * sizeof(*bytes_iov));
1583 state->smb1.iov_count = iov_count + 4;
1585 if (timeout_msec > 0) {
1586 struct timeval endtime;
1588 endtime = timeval_current_ofs_msec(timeout_msec);
1589 if (!tevent_req_set_endtime(req, ev, endtime)) {
1594 switch (smb_command) {
1598 state->one_way = true;
1601 state->one_way = true;
1602 state->smb1.one_way_seqnum = true;
1606 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1607 state->one_way = true;
1615 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1616 struct iovec *iov, int iov_count,
1618 bool one_way_seqnum)
1620 TALLOC_CTX *frame = NULL;
1624 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1625 * iovec directly. MD5Update would do that just fine.
1628 if (iov_count < 4) {
1629 return NT_STATUS_INVALID_PARAMETER_MIX;
1631 if (iov[0].iov_len != NBT_HDR_SIZE) {
1632 return NT_STATUS_INVALID_PARAMETER_MIX;
1634 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1635 return NT_STATUS_INVALID_PARAMETER_MIX;
1637 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1638 return NT_STATUS_INVALID_PARAMETER_MIX;
1640 if (iov[3].iov_len != sizeof(uint16_t)) {
1641 return NT_STATUS_INVALID_PARAMETER_MIX;
1644 frame = talloc_stackframe();
1646 buf = iov_concat(frame, &iov[1], iov_count - 1);
1648 return NT_STATUS_NO_MEMORY;
1651 *seqnum = smb_signing_next_seqnum(conn->smb1.signing,
1653 smb_signing_sign_pdu(conn->smb1.signing,
1654 buf, talloc_get_size(buf),
1656 memcpy(iov[1].iov_base, buf, iov[1].iov_len);
1659 return NT_STATUS_OK;
1662 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1663 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1664 TALLOC_CTX *tmp_mem,
1667 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1668 struct smbXcli_req_state *state,
1669 struct iovec *iov, int iov_count)
1671 struct tevent_req *subreq;
1677 if (!smbXcli_conn_is_connected(state->conn)) {
1678 return NT_STATUS_CONNECTION_DISCONNECTED;
1681 if (state->conn->protocol > PROTOCOL_NT1) {
1682 DBG_ERR("called for dialect[%s] server[%s]\n",
1683 smb_protocol_types_string(state->conn->protocol),
1684 smbXcli_conn_remote_name(state->conn));
1685 return NT_STATUS_REVISION_MISMATCH;
1688 if (iov_count < 4) {
1689 return NT_STATUS_INVALID_PARAMETER_MIX;
1691 if (iov[0].iov_len != NBT_HDR_SIZE) {
1692 return NT_STATUS_INVALID_PARAMETER_MIX;
1694 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1695 return NT_STATUS_INVALID_PARAMETER_MIX;
1697 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1698 return NT_STATUS_INVALID_PARAMETER_MIX;
1700 if (iov[3].iov_len != sizeof(uint16_t)) {
1701 return NT_STATUS_INVALID_PARAMETER_MIX;
1704 cmd = CVAL(iov[1].iov_base, HDR_COM);
1705 if (cmd == SMBreadBraw) {
1706 if (smbXcli_conn_has_async_calls(state->conn)) {
1707 return NT_STATUS_INVALID_PARAMETER_MIX;
1709 state->conn->smb1.read_braw_req = req;
1712 if (state->smb1.mid != 0) {
1713 mid = state->smb1.mid;
1715 mid = smb1cli_alloc_mid(state->conn);
1717 SSVAL(iov[1].iov_base, HDR_MID, mid);
1719 nbtlen = iov_buflen(&iov[1], iov_count-1);
1720 if ((nbtlen == -1) || (nbtlen > 0x1FFFF)) {
1721 return NT_STATUS_INVALID_PARAMETER_MIX;
1724 _smb_setlen_nbt(iov[0].iov_base, nbtlen);
1726 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1727 &state->smb1.seqnum,
1728 state->smb1.one_way_seqnum);
1730 if (!NT_STATUS_IS_OK(status)) {
1735 * If we supported multiple encrytion contexts
1736 * here we'd look up based on tid.
1738 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1739 char *buf, *enc_buf;
1741 buf = (char *)iov_concat(talloc_tos(), iov, iov_count);
1743 return NT_STATUS_NO_MEMORY;
1745 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1746 (char *)buf, &enc_buf);
1748 if (!NT_STATUS_IS_OK(status)) {
1749 DEBUG(0, ("Error in encrypting client message: %s\n",
1750 nt_errstr(status)));
1753 buf = (char *)talloc_memdup(state, enc_buf,
1754 smb_len_nbt(enc_buf)+4);
1757 return NT_STATUS_NO_MEMORY;
1759 iov[0].iov_base = (void *)buf;
1760 iov[0].iov_len = talloc_get_size(buf);
1764 if (state->conn->dispatch_incoming == NULL) {
1765 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1768 if (!smbXcli_req_set_pending(req)) {
1769 return NT_STATUS_NO_MEMORY;
1772 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
1774 subreq = writev_send(state, state->ev, state->conn->outgoing,
1775 state->conn->sock_fd, false, iov, iov_count);
1776 if (subreq == NULL) {
1777 return NT_STATUS_NO_MEMORY;
1779 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1780 state->write_req = subreq;
1782 return NT_STATUS_OK;
1785 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1786 struct tevent_context *ev,
1787 struct smbXcli_conn *conn,
1788 uint8_t smb_command,
1789 uint8_t additional_flags,
1790 uint8_t clear_flags,
1791 uint16_t additional_flags2,
1792 uint16_t clear_flags2,
1793 uint32_t timeout_msec,
1795 struct smbXcli_tcon *tcon,
1796 struct smbXcli_session *session,
1797 uint8_t wct, uint16_t *vwv,
1799 const uint8_t *bytes)
1801 struct tevent_req *req;
1805 iov.iov_base = discard_const_p(void, bytes);
1806 iov.iov_len = num_bytes;
1808 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1809 additional_flags, clear_flags,
1810 additional_flags2, clear_flags2,
1817 if (!tevent_req_is_in_progress(req)) {
1818 return tevent_req_post(req, ev);
1820 status = smb1cli_req_chain_submit(&req, 1);
1821 if (tevent_req_nterror(req, status)) {
1822 return tevent_req_post(req, ev);
1827 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1829 struct tevent_req *req =
1830 tevent_req_callback_data(subreq,
1832 struct smbXcli_req_state *state =
1833 tevent_req_data(req,
1834 struct smbXcli_req_state);
1838 state->write_req = NULL;
1840 nwritten = writev_recv(subreq, &err);
1841 TALLOC_FREE(subreq);
1842 if (nwritten == -1) {
1843 /* here, we need to notify all pending requests */
1844 NTSTATUS status = map_nt_error_from_unix_common(err);
1845 smbXcli_conn_disconnect(state->conn, status);
1849 if (state->one_way) {
1850 state->inbuf = NULL;
1851 tevent_req_done(req);
1856 static void smbXcli_conn_received(struct tevent_req *subreq)
1858 struct smbXcli_conn *conn =
1859 tevent_req_callback_data(subreq,
1860 struct smbXcli_conn);
1861 TALLOC_CTX *frame = talloc_stackframe();
1867 if (subreq != conn->read_smb_req) {
1868 DEBUG(1, ("Internal error: cli_smb_received called with "
1869 "unexpected subreq\n"));
1870 smbXcli_conn_disconnect(conn, NT_STATUS_INTERNAL_ERROR);
1874 conn->read_smb_req = NULL;
1876 received = read_smb_recv(subreq, frame, &inbuf, &err);
1877 TALLOC_FREE(subreq);
1878 if (received == -1) {
1879 status = map_nt_error_from_unix_common(err);
1880 smbXcli_conn_disconnect(conn, status);
1885 status = conn->dispatch_incoming(conn, frame, inbuf);
1887 if (NT_STATUS_IS_OK(status)) {
1889 * We should not do any more processing
1890 * as the dispatch function called
1891 * tevent_req_done().
1896 if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1898 * We got an error, so notify all pending requests
1900 smbXcli_conn_disconnect(conn, status);
1905 * We got NT_STATUS_RETRY, so we may ask for a
1906 * next incoming pdu.
1908 if (!smbXcli_conn_receive_next(conn)) {
1909 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1913 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1914 struct iovec **piov, int *pnum_iov)
1925 size_t min_size = MIN_SMB_SIZE;
1927 buflen = smb_len_tcp(buf);
1930 hdr = buf + NBT_HDR_SIZE;
1932 status = smb1cli_pull_raw_error(hdr);
1933 if (NT_STATUS_IS_ERR(status)) {
1935 * This is an ugly hack to support OS/2
1936 * which skips the byte_count in the DATA block
1937 * on some error responses.
1941 min_size -= sizeof(uint16_t);
1944 if (buflen < min_size) {
1945 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1949 * This returns iovec elements in the following order:
1964 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1966 return NT_STATUS_NO_MEMORY;
1968 iov[0].iov_base = hdr;
1969 iov[0].iov_len = HDR_WCT;
1972 cmd = CVAL(hdr, HDR_COM);
1976 size_t len = buflen - taken;
1978 struct iovec *iov_tmp;
1985 * we need at least WCT
1987 needed = sizeof(uint8_t);
1989 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1990 __location__, (int)len, (int)needed));
1995 * Now we check if the specified words are there
1997 wct = CVAL(hdr, wct_ofs);
1998 needed += wct * sizeof(uint16_t);
2000 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
2001 __location__, (int)len, (int)needed));
2005 if ((num_iov == 1) &&
2007 NT_STATUS_IS_ERR(status))
2010 * This is an ugly hack to support OS/2
2011 * which skips the byte_count in the DATA block
2012 * on some error responses.
2016 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2018 if (iov_tmp == NULL) {
2020 return NT_STATUS_NO_MEMORY;
2023 cur = &iov[num_iov];
2027 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
2029 cur[1].iov_base = cur[0].iov_base;
2036 * we need at least BCC
2038 needed += sizeof(uint16_t);
2040 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
2041 __location__, (int)len, (int)needed));
2046 * Now we check if the specified bytes are there
2048 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
2049 bcc = SVAL(hdr, bcc_ofs);
2050 needed += bcc * sizeof(uint8_t);
2052 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
2053 __location__, (int)len, (int)needed));
2058 * we allocate 2 iovec structures for words and bytes
2060 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2062 if (iov_tmp == NULL) {
2064 return NT_STATUS_NO_MEMORY;
2067 cur = &iov[num_iov];
2070 cur[0].iov_len = wct * sizeof(uint16_t);
2071 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
2072 cur[1].iov_len = bcc * sizeof(uint8_t);
2073 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
2077 if (!smb1cli_is_andx_req(cmd)) {
2079 * If the current command does not have AndX chanining
2085 if (wct == 0 && bcc == 0) {
2087 * An empty response also ends the chain,
2088 * most likely with an error.
2094 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
2095 __location__, (int)wct, (int)cmd));
2098 cmd = CVAL(cur[0].iov_base, 0);
2101 * If it is the end of the chain we are also done.
2105 wct_ofs = SVAL(cur[0].iov_base, 2);
2107 if (wct_ofs < taken) {
2108 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2110 if (wct_ofs > buflen) {
2111 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2115 * we consumed everything up to the start of the next
2121 remaining = buflen - taken;
2123 if (remaining > 0 && num_iov >= 3) {
2125 * The last DATA block gets the remaining
2126 * bytes, this is needed to support
2127 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
2129 iov[num_iov-1].iov_len += remaining;
2133 *pnum_iov = num_iov;
2134 return NT_STATUS_OK;
2138 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2141 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2142 TALLOC_CTX *tmp_mem,
2145 struct tevent_req *req;
2146 struct smbXcli_req_state *state;
2153 uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
2154 size_t len = smb_len_tcp(inbuf);
2155 struct iovec *iov = NULL;
2157 struct tevent_req **chain = NULL;
2158 size_t num_chained = 0;
2159 size_t num_responses = 0;
2161 if (conn->smb1.read_braw_req != NULL) {
2162 req = conn->smb1.read_braw_req;
2163 conn->smb1.read_braw_req = NULL;
2164 state = tevent_req_data(req, struct smbXcli_req_state);
2166 smbXcli_req_unset_pending(req);
2168 if (state->smb1.recv_iov == NULL) {
2170 * For requests with more than
2171 * one response, we have to readd the
2174 state->smb1.recv_iov = talloc_zero_array(state,
2177 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
2178 return NT_STATUS_OK;
2182 state->smb1.recv_iov[0].iov_base = (void *)(inhdr);
2183 state->smb1.recv_iov[0].iov_len = len;
2184 ZERO_STRUCT(state->smb1.recv_iov[1]);
2185 ZERO_STRUCT(state->smb1.recv_iov[2]);
2187 state->smb1.recv_cmd = SMBreadBraw;
2188 state->smb1.recv_status = NT_STATUS_OK;
2189 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
2191 tevent_req_done(req);
2192 return NT_STATUS_OK;
2195 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
2196 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
2197 DEBUG(10, ("Got non-SMB PDU\n"));
2198 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2202 * If we supported multiple encrytion contexts
2203 * here we'd look up based on tid.
2205 if (common_encryption_on(conn->smb1.trans_enc)
2206 && (CVAL(inbuf, 0) == 0)) {
2207 uint16_t enc_ctx_num;
2209 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
2210 if (!NT_STATUS_IS_OK(status)) {
2211 DEBUG(10, ("get_enc_ctx_num returned %s\n",
2212 nt_errstr(status)));
2216 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
2217 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
2219 conn->smb1.trans_enc->enc_ctx_num));
2220 return NT_STATUS_INVALID_HANDLE;
2223 status = common_decrypt_buffer(conn->smb1.trans_enc,
2225 if (!NT_STATUS_IS_OK(status)) {
2226 DEBUG(10, ("common_decrypt_buffer returned %s\n",
2227 nt_errstr(status)));
2230 inhdr = inbuf + NBT_HDR_SIZE;
2231 len = smb_len_nbt(inbuf);
2234 mid = SVAL(inhdr, HDR_MID);
2235 num_pending = talloc_array_length(conn->pending);
2237 for (i=0; i<num_pending; i++) {
2238 if (mid == smb1cli_req_mid(conn->pending[i])) {
2242 if (i == num_pending) {
2243 /* Dump unexpected reply */
2244 return NT_STATUS_RETRY;
2247 oplock_break = false;
2249 if (mid == 0xffff) {
2251 * Paranoia checks that this is really an oplock break request.
2253 oplock_break = (len == 51); /* hdr + 8 words */
2254 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
2255 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
2256 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
2257 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
2259 if (!oplock_break) {
2260 /* Dump unexpected reply */
2261 return NT_STATUS_RETRY;
2265 req = conn->pending[i];
2266 state = tevent_req_data(req, struct smbXcli_req_state);
2268 if (!oplock_break /* oplock breaks are not signed */
2269 && !smb_signing_check_pdu(conn->smb1.signing,
2270 inhdr, len, state->smb1.seqnum+1)) {
2271 DEBUG(10, ("cli_check_sign_mac failed\n"));
2272 return NT_STATUS_ACCESS_DENIED;
2275 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
2277 if (!NT_STATUS_IS_OK(status)) {
2278 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
2279 nt_errstr(status)));
2283 cmd = CVAL(inhdr, HDR_COM);
2284 status = smb1cli_pull_raw_error(inhdr);
2286 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED) &&
2287 (state->session != NULL) && state->session->disconnect_expired)
2290 * this should be a short term hack
2291 * until the upper layers have implemented
2292 * re-authentication.
2297 if (state->smb1.chained_requests == NULL) {
2299 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2302 smbXcli_req_unset_pending(req);
2304 if (state->smb1.recv_iov == NULL) {
2306 * For requests with more than
2307 * one response, we have to readd the
2310 state->smb1.recv_iov = talloc_zero_array(state,
2313 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
2314 return NT_STATUS_OK;
2318 state->smb1.recv_cmd = cmd;
2319 state->smb1.recv_status = status;
2320 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
2322 state->smb1.recv_iov[0] = iov[0];
2323 state->smb1.recv_iov[1] = iov[1];
2324 state->smb1.recv_iov[2] = iov[2];
2326 if (talloc_array_length(conn->pending) == 0) {
2327 tevent_req_done(req);
2328 return NT_STATUS_OK;
2331 tevent_req_defer_callback(req, state->ev);
2332 tevent_req_done(req);
2333 return NT_STATUS_RETRY;
2336 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
2337 num_chained = talloc_array_length(chain);
2338 num_responses = (num_iov - 1)/2;
2340 if (num_responses > num_chained) {
2341 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2344 for (i=0; i<num_chained; i++) {
2345 size_t iov_idx = 1 + (i*2);
2346 struct iovec *cur = &iov[iov_idx];
2350 state = tevent_req_data(req, struct smbXcli_req_state);
2352 smbXcli_req_unset_pending(req);
2355 * as we finish multiple requests here
2356 * we need to defer the callbacks as
2357 * they could destroy our current stack state.
2359 tevent_req_defer_callback(req, state->ev);
2361 if (i >= num_responses) {
2362 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
2366 if (state->smb1.recv_iov == NULL) {
2368 * For requests with more than
2369 * one response, we have to readd the
2372 state->smb1.recv_iov = talloc_zero_array(state,
2375 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
2380 state->smb1.recv_cmd = cmd;
2382 if (i == (num_responses - 1)) {
2384 * The last request in the chain gets the status
2386 state->smb1.recv_status = status;
2388 cmd = CVAL(cur[0].iov_base, 0);
2389 state->smb1.recv_status = NT_STATUS_OK;
2392 state->inbuf = inbuf;
2395 * Note: here we use talloc_reference() in a way
2396 * that does not expose it to the caller.
2398 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
2399 if (tevent_req_nomem(inbuf_ref, req)) {
2403 /* copy the related buffers */
2404 state->smb1.recv_iov[0] = iov[0];
2405 state->smb1.recv_iov[1] = cur[0];
2406 state->smb1.recv_iov[2] = cur[1];
2408 tevent_req_done(req);
2411 return NT_STATUS_RETRY;
2414 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
2415 TALLOC_CTX *mem_ctx,
2416 struct iovec **piov,
2420 uint32_t *pvwv_offset,
2421 uint32_t *pnum_bytes,
2423 uint32_t *pbytes_offset,
2425 const struct smb1cli_req_expected_response *expected,
2426 size_t num_expected)
2428 struct smbXcli_req_state *state =
2429 tevent_req_data(req,
2430 struct smbXcli_req_state);
2431 NTSTATUS status = NT_STATUS_OK;
2432 struct iovec *recv_iov = NULL;
2433 uint8_t *hdr = NULL;
2435 uint32_t vwv_offset = 0;
2436 uint16_t *vwv = NULL;
2437 uint32_t num_bytes = 0;
2438 uint32_t bytes_offset = 0;
2439 uint8_t *bytes = NULL;
2441 bool found_status = false;
2442 bool found_size = false;
2456 if (pvwv_offset != NULL) {
2459 if (pnum_bytes != NULL) {
2462 if (pbytes != NULL) {
2465 if (pbytes_offset != NULL) {
2468 if (pinbuf != NULL) {
2472 if (state->inbuf != NULL) {
2473 recv_iov = state->smb1.recv_iov;
2474 state->smb1.recv_iov = NULL;
2475 if (state->smb1.recv_cmd != SMBreadBraw) {
2476 hdr = (uint8_t *)recv_iov[0].iov_base;
2477 wct = recv_iov[1].iov_len/2;
2478 vwv = (uint16_t *)recv_iov[1].iov_base;
2479 vwv_offset = PTR_DIFF(vwv, hdr);
2480 num_bytes = recv_iov[2].iov_len;
2481 bytes = (uint8_t *)recv_iov[2].iov_base;
2482 bytes_offset = PTR_DIFF(bytes, hdr);
2486 if (tevent_req_is_nterror(req, &status)) {
2487 for (i=0; i < num_expected; i++) {
2488 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2489 found_status = true;
2495 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2501 if (num_expected == 0) {
2502 found_status = true;
2506 status = state->smb1.recv_status;
2508 for (i=0; i < num_expected; i++) {
2509 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2513 found_status = true;
2514 if (expected[i].wct == 0) {
2519 if (expected[i].wct == wct) {
2525 if (!found_status) {
2530 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2534 *piov = talloc_move(mem_ctx, &recv_iov);
2546 if (pvwv_offset != NULL) {
2547 *pvwv_offset = vwv_offset;
2549 if (pnum_bytes != NULL) {
2550 *pnum_bytes = num_bytes;
2552 if (pbytes != NULL) {
2555 if (pbytes_offset != NULL) {
2556 *pbytes_offset = bytes_offset;
2558 if (pinbuf != NULL) {
2559 *pinbuf = state->inbuf;
2565 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
2572 for (i=0; i<num_reqs; i++) {
2573 struct smbXcli_req_state *state;
2574 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2575 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
2576 state->smb1.iov_count-2);
2577 wct_ofs = (wct_ofs + 3) & ~3;
2582 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
2584 struct smbXcli_req_state *first_state =
2585 tevent_req_data(reqs[0],
2586 struct smbXcli_req_state);
2587 struct smbXcli_req_state *state;
2589 size_t chain_padding = 0;
2591 struct iovec *iov = NULL;
2592 struct iovec *this_iov;
2596 if (num_reqs == 1) {
2597 return smb1cli_req_writev_submit(reqs[0], first_state,
2598 first_state->smb1.iov,
2599 first_state->smb1.iov_count);
2603 for (i=0; i<num_reqs; i++) {
2604 if (!tevent_req_is_in_progress(reqs[i])) {
2605 return NT_STATUS_INTERNAL_ERROR;
2608 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2610 if (state->smb1.iov_count < 4) {
2611 return NT_STATUS_INVALID_PARAMETER_MIX;
2616 * The NBT and SMB header
2629 iovlen += state->smb1.iov_count - 2;
2632 iov = talloc_zero_array(first_state, struct iovec, iovlen);
2634 return NT_STATUS_NO_MEMORY;
2637 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
2638 first_state, reqs, sizeof(*reqs) * num_reqs);
2639 if (first_state->smb1.chained_requests == NULL) {
2641 return NT_STATUS_NO_MEMORY;
2644 wct_offset = HDR_WCT;
2647 for (i=0; i<num_reqs; i++) {
2648 size_t next_padding = 0;
2651 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2653 if (i < num_reqs-1) {
2654 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
2655 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
2657 TALLOC_FREE(first_state->smb1.chained_requests);
2658 return NT_STATUS_INVALID_PARAMETER_MIX;
2662 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
2663 state->smb1.iov_count-2) + 1;
2664 if ((wct_offset % 4) != 0) {
2665 next_padding = 4 - (wct_offset % 4);
2667 wct_offset += next_padding;
2668 vwv = state->smb1.vwv;
2670 if (i < num_reqs-1) {
2671 struct smbXcli_req_state *next_state =
2672 tevent_req_data(reqs[i+1],
2673 struct smbXcli_req_state);
2674 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
2676 SSVAL(vwv+1, 0, wct_offset);
2677 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
2678 /* properly end the chain */
2679 SCVAL(vwv+0, 0, 0xff);
2680 SCVAL(vwv+0, 1, 0xff);
2686 * The NBT and SMB header
2688 this_iov[0] = state->smb1.iov[0];
2689 this_iov[1] = state->smb1.iov[1];
2693 * This one is a bit subtle. We have to add
2694 * chain_padding bytes between the requests, and we
2695 * have to also include the wct field of the
2696 * subsequent requests. We use the subsequent header
2697 * for the padding, it contains the wct field in its
2700 this_iov[0].iov_len = chain_padding+1;
2701 this_iov[0].iov_base = (void *)&state->smb1.hdr[
2702 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
2703 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
2708 * copy the words and bytes
2710 memcpy(this_iov, state->smb1.iov+2,
2711 sizeof(struct iovec) * (state->smb1.iov_count-2));
2712 this_iov += state->smb1.iov_count - 2;
2713 chain_padding = next_padding;
2716 nbt_len = iov_buflen(&iov[1], iovlen-1);
2717 if ((nbt_len == -1) || (nbt_len > first_state->conn->smb1.max_xmit)) {
2719 TALLOC_FREE(first_state->smb1.chained_requests);
2720 return NT_STATUS_INVALID_PARAMETER_MIX;
2723 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
2724 if (!NT_STATUS_IS_OK(status)) {
2726 TALLOC_FREE(first_state->smb1.chained_requests);
2730 return NT_STATUS_OK;
2733 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
2735 return ((tevent_queue_length(conn->outgoing) != 0)
2736 || (talloc_array_length(conn->pending) != 0));
2739 bool smbXcli_conn_dfs_supported(struct smbXcli_conn *conn)
2741 if (conn->protocol >= PROTOCOL_SMB2_02) {
2742 return (smb2cli_conn_server_capabilities(conn) & SMB2_CAP_DFS);
2745 return (smb1cli_conn_capabilities(conn) & CAP_DFS);
2748 bool smb2cli_conn_req_possible(struct smbXcli_conn *conn, uint32_t *max_dyn_len)
2750 uint16_t credits = 1;
2752 if (conn->smb2.cur_credits == 0) {
2753 if (max_dyn_len != NULL) {
2759 if (conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2760 credits = conn->smb2.cur_credits;
2763 if (max_dyn_len != NULL) {
2764 *max_dyn_len = credits * 65536;
2770 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
2772 return conn->smb2.server.capabilities;
2775 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
2777 return conn->smb2.server.security_mode;
2780 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
2782 return conn->smb2.server.max_trans_size;
2785 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
2787 return conn->smb2.server.max_read_size;
2790 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
2792 return conn->smb2.server.max_write_size;
2795 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
2796 uint16_t max_credits)
2798 conn->smb2.max_credits = max_credits;
2801 uint16_t smb2cli_conn_get_cur_credits(struct smbXcli_conn *conn)
2803 return conn->smb2.cur_credits;
2806 uint8_t smb2cli_conn_get_io_priority(struct smbXcli_conn *conn)
2808 if (conn->protocol < PROTOCOL_SMB3_11) {
2812 return conn->smb2.io_priority;
2815 void smb2cli_conn_set_io_priority(struct smbXcli_conn *conn,
2816 uint8_t io_priority)
2818 conn->smb2.io_priority = io_priority;
2821 uint32_t smb2cli_conn_cc_chunk_len(struct smbXcli_conn *conn)
2823 return conn->smb2.cc_chunk_len;
2826 void smb2cli_conn_set_cc_chunk_len(struct smbXcli_conn *conn,
2829 conn->smb2.cc_chunk_len = chunk_len;
2832 uint32_t smb2cli_conn_cc_max_chunks(struct smbXcli_conn *conn)
2834 return conn->smb2.cc_max_chunks;
2837 void smb2cli_conn_set_cc_max_chunks(struct smbXcli_conn *conn,
2838 uint32_t max_chunks)
2840 conn->smb2.cc_max_chunks = max_chunks;
2843 static void smb2cli_req_cancel_done(struct tevent_req *subreq);
2845 static bool smb2cli_req_cancel(struct tevent_req *req)
2847 struct smbXcli_req_state *state =
2848 tevent_req_data(req,
2849 struct smbXcli_req_state);
2850 struct smbXcli_tcon *tcon = state->tcon;
2851 struct smbXcli_session *session = state->session;
2852 uint8_t *fixed = state->smb2.pad;
2853 uint16_t fixed_len = 4;
2854 struct tevent_req *subreq;
2855 struct smbXcli_req_state *substate;
2858 SSVAL(fixed, 0, 0x04);
2861 subreq = smb2cli_req_create(state, state->ev,
2869 if (subreq == NULL) {
2872 substate = tevent_req_data(subreq, struct smbXcli_req_state);
2874 SIVAL(substate->smb2.hdr, SMB2_HDR_FLAGS, state->smb2.cancel_flags);
2875 SBVAL(substate->smb2.hdr, SMB2_HDR_MESSAGE_ID, state->smb2.cancel_mid);
2876 SBVAL(substate->smb2.hdr, SMB2_HDR_ASYNC_ID, state->smb2.cancel_aid);
2878 status = smb2cli_req_compound_submit(&subreq, 1);
2879 if (!NT_STATUS_IS_OK(status)) {
2880 TALLOC_FREE(subreq);
2884 tevent_req_set_callback(subreq, smb2cli_req_cancel_done, NULL);
2889 static void smb2cli_req_cancel_done(struct tevent_req *subreq)
2891 /* we do not care about the result */
2892 TALLOC_FREE(subreq);
2895 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2896 struct tevent_context *ev,
2897 struct smbXcli_conn *conn,
2899 uint32_t additional_flags,
2900 uint32_t clear_flags,
2901 uint32_t timeout_msec,
2902 struct smbXcli_tcon *tcon,
2903 struct smbXcli_session *session,
2904 const uint8_t *fixed,
2908 uint32_t max_dyn_len)
2910 struct tevent_req *req;
2911 struct smbXcli_req_state *state;
2915 bool use_channel_sequence = conn->smb2.force_channel_sequence;
2916 uint16_t channel_sequence = 0;
2917 bool use_replay_flag = false;
2919 req = tevent_req_create(mem_ctx, &state,
2920 struct smbXcli_req_state);
2927 state->session = session;
2930 if (conn->smb2.server.capabilities & SMB2_CAP_PERSISTENT_HANDLES) {
2931 use_channel_sequence = true;
2932 } else if (conn->smb2.server.capabilities & SMB2_CAP_MULTI_CHANNEL) {
2933 use_channel_sequence = true;
2936 if (smbXcli_conn_protocol(conn) >= PROTOCOL_SMB3_00) {
2937 use_replay_flag = true;
2940 if (smbXcli_conn_protocol(conn) >= PROTOCOL_SMB3_11) {
2941 flags |= SMB2_PRIORITY_VALUE_TO_MASK(conn->smb2.io_priority);
2945 uid = session->smb2->session_id;
2947 if (use_channel_sequence) {
2948 channel_sequence = session->smb2->channel_sequence;
2951 if (use_replay_flag && session->smb2->replay_active) {
2952 additional_flags |= SMB2_HDR_FLAG_REPLAY_OPERATION;
2955 state->smb2.should_sign = session->smb2->should_sign;
2956 state->smb2.should_encrypt = session->smb2->should_encrypt;
2958 if (cmd == SMB2_OP_SESSSETUP &&
2959 session->smb2_channel.signing_key.length == 0 &&
2960 session->smb2->signing_key.length != 0)
2963 * a session bind needs to be signed
2965 state->smb2.should_sign = true;
2968 if (cmd == SMB2_OP_SESSSETUP &&
2969 session->smb2_channel.signing_key.length == 0) {
2970 state->smb2.should_encrypt = false;
2973 if (additional_flags & SMB2_HDR_FLAG_SIGNED) {
2974 if (session->smb2_channel.signing_key.length == 0) {
2975 tevent_req_nterror(req, NT_STATUS_NO_USER_SESSION_KEY);
2979 additional_flags &= ~SMB2_HDR_FLAG_SIGNED;
2980 state->smb2.should_sign = true;
2985 tid = tcon->smb2.tcon_id;
2987 if (tcon->smb2.should_sign) {
2988 state->smb2.should_sign = true;
2990 if (tcon->smb2.should_encrypt) {
2991 state->smb2.should_encrypt = true;
2995 if (state->smb2.should_encrypt) {
2996 state->smb2.should_sign = false;
2999 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
3000 if (state->smb2.recv_iov == NULL) {
3005 flags |= additional_flags;
3006 flags &= ~clear_flags;
3008 state->smb2.fixed = fixed;
3009 state->smb2.fixed_len = fixed_len;
3010 state->smb2.dyn = dyn;
3011 state->smb2.dyn_len = dyn_len;
3012 state->smb2.max_dyn_len = max_dyn_len;
3014 if (state->smb2.should_encrypt) {
3015 SIVAL(state->smb2.transform, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
3016 SBVAL(state->smb2.transform, SMB2_TF_SESSION_ID, uid);
3019 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
3020 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
3021 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
3022 SSVAL(state->smb2.hdr, SMB2_HDR_CHANNEL_SEQUENCE, channel_sequence);
3023 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
3024 SIVAL(state->smb2.hdr, SMB2_HDR_PID, 0); /* reserved */
3025 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
3026 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
3029 case SMB2_OP_CANCEL:
3030 state->one_way = true;
3034 * If this is a dummy request, it will have
3035 * UINT64_MAX as message id.
3036 * If we send on break acknowledgement,
3037 * this gets overwritten later.
3039 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
3043 if (timeout_msec > 0) {
3044 struct timeval endtime;
3046 endtime = timeval_current_ofs_msec(timeout_msec);
3047 if (!tevent_req_set_endtime(req, ev, endtime)) {
3055 void smb2cli_req_set_notify_async(struct tevent_req *req)
3057 struct smbXcli_req_state *state =
3058 tevent_req_data(req,
3059 struct smbXcli_req_state);
3061 state->smb2.notify_async = true;
3064 static void smb2cli_req_writev_done(struct tevent_req *subreq);
3065 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
3066 TALLOC_CTX *tmp_mem,
3069 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
3072 struct smbXcli_req_state *state;
3073 struct tevent_req *subreq;
3075 int i, num_iov, nbt_len;
3077 const DATA_BLOB *encryption_key = NULL;
3078 uint64_t encryption_session_id = 0;
3079 uint64_t nonce_high = UINT64_MAX;
3080 uint64_t nonce_low = UINT64_MAX;
3083 * 1 for the nbt length, optional TRANSFORM
3084 * per request: HDR, fixed, dyn, padding
3085 * -1 because the last one does not need padding
3088 iov = talloc_array(reqs[0], struct iovec, 1 + 1 + 4*num_reqs - 1);
3090 return NT_STATUS_NO_MEMORY;
3097 * the session of the first request that requires encryption
3098 * specifies the encryption key.
3100 for (i=0; i<num_reqs; i++) {
3101 if (!tevent_req_is_in_progress(reqs[i])) {
3102 return NT_STATUS_INTERNAL_ERROR;
3105 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
3107 if (!smbXcli_conn_is_connected(state->conn)) {
3108 return NT_STATUS_CONNECTION_DISCONNECTED;
3111 if ((state->conn->protocol != PROTOCOL_NONE) &&
3112 (state->conn->protocol < PROTOCOL_SMB2_02)) {
3113 return NT_STATUS_REVISION_MISMATCH;
3116 if (state->session == NULL) {
3120 if (!state->smb2.should_encrypt) {
3124 encryption_key = &state->session->smb2->encryption_key;
3125 if (encryption_key->length == 0) {
3126 return NT_STATUS_INVALID_PARAMETER_MIX;
3129 encryption_session_id = state->session->smb2->session_id;
3131 state->session->smb2->nonce_low += 1;
3132 if (state->session->smb2->nonce_low == 0) {
3133 state->session->smb2->nonce_high += 1;
3134 state->session->smb2->nonce_low += 1;
3138 * CCM and GCM algorithms must never have their
3139 * nonce wrap, or the security of the whole
3140 * communication and the keys is destroyed.
3141 * We must drop the connection once we have
3142 * transfered too much data.
3144 * NOTE: We assume nonces greater than 8 bytes.
3146 if (state->session->smb2->nonce_high >=
3147 state->session->smb2->nonce_high_max)
3149 return NT_STATUS_ENCRYPTION_FAILED;
3152 nonce_high = state->session->smb2->nonce_high_random;
3153 nonce_high += state->session->smb2->nonce_high;
3154 nonce_low = state->session->smb2->nonce_low;
3157 iov[num_iov].iov_base = state->smb2.transform;
3158 iov[num_iov].iov_len = sizeof(state->smb2.transform);
3161 SBVAL(state->smb2.transform, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
3162 SBVAL(state->smb2.transform, SMB2_TF_NONCE,
3164 SBVAL(state->smb2.transform, SMB2_TF_NONCE+8,
3166 SBVAL(state->smb2.transform, SMB2_TF_SESSION_ID,
3167 encryption_session_id);
3169 nbt_len += SMB2_TF_HDR_SIZE;
3173 for (i=0; i<num_reqs; i++) {
3182 const DATA_BLOB *signing_key = NULL;
3184 if (!tevent_req_is_in_progress(reqs[i])) {
3185 return NT_STATUS_INTERNAL_ERROR;
3188 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
3190 if (!smbXcli_conn_is_connected(state->conn)) {
3191 return NT_STATUS_CONNECTION_DISCONNECTED;
3194 if ((state->conn->protocol != PROTOCOL_NONE) &&
3195 (state->conn->protocol < PROTOCOL_SMB2_02)) {
3196 return NT_STATUS_REVISION_MISMATCH;
3199 opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
3200 if (opcode == SMB2_OP_CANCEL) {
3204 avail = UINT64_MAX - state->conn->smb2.mid;
3206 return NT_STATUS_CONNECTION_ABORTED;
3209 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
3210 uint32_t max_dyn_len = 1;
3212 max_dyn_len = MAX(max_dyn_len, state->smb2.dyn_len);
3213 max_dyn_len = MAX(max_dyn_len, state->smb2.max_dyn_len);
3215 charge = (max_dyn_len - 1)/ 65536 + 1;
3220 charge = MAX(state->smb2.credit_charge, charge);
3222 avail = MIN(avail, state->conn->smb2.cur_credits);
3223 if (avail < charge) {
3224 return NT_STATUS_INTERNAL_ERROR;
3228 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
3229 credits = state->conn->smb2.max_credits -
3230 state->conn->smb2.cur_credits;
3232 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
3236 mid = state->conn->smb2.mid;
3237 state->conn->smb2.mid += charge;
3238 state->conn->smb2.cur_credits -= charge;
3240 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
3241 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
3243 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
3244 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
3246 state->smb2.cancel_flags = 0;
3247 state->smb2.cancel_mid = mid;
3248 state->smb2.cancel_aid = 0;
3251 if (state->session && encryption_key == NULL) {
3253 * We prefer the channel signing key if it is
3256 if (state->smb2.should_sign) {
3257 signing_key = &state->session->smb2_channel.signing_key;
3261 * If it is a channel binding, we already have the main
3262 * signing key and try that one.
3264 if (signing_key && signing_key->length == 0) {
3265 signing_key = &state->session->smb2->signing_key;
3269 * If we do not have any session key yet, we skip the
3270 * signing of SMB2_OP_SESSSETUP requests.
3272 if (signing_key && signing_key->length == 0) {
3278 iov[num_iov].iov_base = state->smb2.hdr;
3279 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
3282 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
3283 iov[num_iov].iov_len = state->smb2.fixed_len;
3286 if (state->smb2.dyn != NULL) {
3287 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
3288 iov[num_iov].iov_len = state->smb2.dyn_len;
3292 reqlen = sizeof(state->smb2.hdr);
3293 reqlen += state->smb2.fixed_len;
3294 reqlen += state->smb2.dyn_len;
3296 if (i < num_reqs-1) {
3297 if ((reqlen % 8) > 0) {
3298 uint8_t pad = 8 - (reqlen % 8);
3299 iov[num_iov].iov_base = state->smb2.pad;
3300 iov[num_iov].iov_len = pad;
3304 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
3307 state->smb2.encryption_session_id = encryption_session_id;
3309 if (signing_key != NULL) {
3312 status = smb2_signing_sign_pdu(*signing_key,
3313 state->session->conn->protocol,
3314 &iov[hdr_iov], num_iov - hdr_iov);
3315 if (!NT_STATUS_IS_OK(status)) {
3322 ret = smbXcli_req_set_pending(reqs[i]);
3324 return NT_STATUS_NO_MEMORY;
3328 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
3329 _smb_setlen_tcp(state->length_hdr, nbt_len);
3330 iov[0].iov_base = state->length_hdr;
3331 iov[0].iov_len = sizeof(state->length_hdr);
3333 if (encryption_key != NULL) {
3335 size_t buflen = nbt_len - SMB2_TF_HDR_SIZE;
3339 buf = talloc_array(iov, uint8_t, buflen);
3341 return NT_STATUS_NO_MEMORY;
3345 * We copy the buffers before encrypting them,
3346 * this is at least currently needed for the
3347 * to keep state->smb2.hdr.
3349 * Also the callers may expect there buffers
3352 for (vi = tf_iov + 1; vi < num_iov; vi++) {
3353 struct iovec *v = &iov[vi];
3354 const uint8_t *o = (const uint8_t *)v->iov_base;
3356 memcpy(buf, o, v->iov_len);
3357 v->iov_base = (void *)buf;