2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "../libcli/smb/smb_common.h"
29 #include "../libcli/smb/smb_seal.h"
30 #include "../libcli/smb/smb_signing.h"
31 #include "../libcli/smb/read_smb.h"
32 #include "smbXcli_base.h"
33 #include "librpc/ndr/libndr.h"
37 struct smbXcli_session;
41 struct sockaddr_storage local_ss;
42 struct sockaddr_storage remote_ss;
43 const char *remote_name;
45 struct tevent_queue *outgoing;
46 struct tevent_req **pending;
47 struct tevent_req *read_smb_req;
49 enum protocol_types protocol;
52 bool mandatory_signing;
55 * The incoming dispatch function should return:
56 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
57 * - NT_STATUS_OK, if no more processing is desired, e.g.
58 * the dispatch function called
60 * - All other return values disconnect the connection.
62 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
68 uint32_t capabilities;
73 uint32_t capabilities;
76 uint16_t security_mode;
85 const char *workgroup;
91 uint32_t capabilities;
96 struct smb_signing_state *signing;
97 struct smb_trans_enc_state *trans_enc;
102 uint16_t security_mode;
107 uint32_t capabilities;
108 uint16_t security_mode;
110 uint32_t max_trans_size;
111 uint32_t max_read_size;
112 uint32_t max_write_size;
119 uint16_t cur_credits;
120 uint16_t max_credits;
123 struct smbXcli_session *sessions;
126 struct smbXcli_session {
127 struct smbXcli_session *prev, *next;
128 struct smbXcli_conn *conn;
132 uint16_t session_flags;
133 DATA_BLOB signing_key;
134 DATA_BLOB session_key;
140 struct smbXcli_req_state {
141 struct tevent_context *ev;
142 struct smbXcli_conn *conn;
143 struct smbXcli_session *session; /* maybe NULL */
145 uint8_t length_hdr[4];
152 /* Space for the header including the wct */
153 uint8_t hdr[HDR_VWV];
156 * For normal requests, smb1cli_req_send chooses a mid.
157 * SecondaryV trans requests need to use the mid of the primary
158 * request, so we need a place to store it.
159 * Assume it is set if != 0.
164 uint8_t bytecount_buf[2];
166 #define MAX_SMB_IOV 5
167 /* length_hdr, hdr, words, byte_count, buffers */
168 struct iovec iov[1 + 3 + MAX_SMB_IOV];
172 struct tevent_req **chained_requests;
175 NTSTATUS recv_status;
176 /* always an array of 3 talloc elements */
177 struct iovec *recv_iov;
181 const uint8_t *fixed;
187 uint8_t pad[7]; /* padding space for compounding */
189 /* always an array of 3 talloc elements */
190 struct iovec *recv_iov;
192 uint16_t credit_charge;
194 bool signing_skipped;
198 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
201 * NT_STATUS_OK, means we do not notify the callers
203 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
205 while (conn->sessions) {
206 conn->sessions->conn = NULL;
207 DLIST_REMOVE(conn->sessions, conn->sessions);
210 if (conn->smb1.trans_enc) {
211 common_free_encryption_state(&conn->smb1.trans_enc);
217 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
219 const char *remote_name,
220 enum smb_signing_setting signing_state,
221 uint32_t smb1_capabilities,
222 struct GUID *client_guid)
224 struct smbXcli_conn *conn = NULL;
226 struct sockaddr *sa = NULL;
230 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
235 conn->remote_name = talloc_strdup(conn, remote_name);
236 if (conn->remote_name == NULL) {
242 ss = (void *)&conn->local_ss;
243 sa = (struct sockaddr *)ss;
244 sa_length = sizeof(conn->local_ss);
245 ret = getsockname(fd, sa, &sa_length);
249 ss = (void *)&conn->remote_ss;
250 sa = (struct sockaddr *)ss;
251 sa_length = sizeof(conn->remote_ss);
252 ret = getpeername(fd, sa, &sa_length);
257 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
258 if (conn->outgoing == NULL) {
261 conn->pending = NULL;
263 conn->protocol = PROTOCOL_NONE;
265 switch (signing_state) {
266 case SMB_SIGNING_OFF:
268 conn->allow_signing = false;
269 conn->desire_signing = false;
270 conn->mandatory_signing = false;
272 case SMB_SIGNING_DEFAULT:
273 case SMB_SIGNING_IF_REQUIRED:
274 /* if the server requires it */
275 conn->allow_signing = true;
276 conn->desire_signing = false;
277 conn->mandatory_signing = false;
279 case SMB_SIGNING_REQUIRED:
281 conn->allow_signing = true;
282 conn->desire_signing = true;
283 conn->mandatory_signing = true;
287 conn->smb1.client.capabilities = smb1_capabilities;
288 conn->smb1.client.max_xmit = UINT16_MAX;
290 conn->smb1.capabilities = conn->smb1.client.capabilities;
291 conn->smb1.max_xmit = 1024;
295 /* initialise signing */
296 conn->smb1.signing = smb_signing_init(conn,
298 conn->desire_signing,
299 conn->mandatory_signing);
300 if (!conn->smb1.signing) {
304 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
305 if (conn->mandatory_signing) {
306 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
309 conn->smb2.client.guid = *client_guid;
312 conn->smb2.cur_credits = 1;
313 conn->smb2.max_credits = 0;
315 talloc_set_destructor(conn, smbXcli_conn_destructor);
323 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
329 if (conn->fd == -1) {
336 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
338 return conn->protocol;
341 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
343 if (conn->protocol >= PROTOCOL_SMB2_02) {
347 if (conn->smb1.capabilities & CAP_UNICODE) {
354 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
356 set_socket_options(conn->fd, options);
359 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
361 return &conn->local_ss;
364 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
366 return &conn->remote_ss;
369 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
371 return conn->remote_name;
374 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
376 if (conn->protocol >= PROTOCOL_SMB2_02) {
383 return conn->smb1.server.max_mux;
386 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
388 if (conn->protocol >= PROTOCOL_SMB2_02) {
389 return conn->smb2.server.system_time;
392 return conn->smb1.server.system_time;
395 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
397 if (conn->protocol >= PROTOCOL_SMB2_02) {
398 return &conn->smb2.server.gss_blob;
401 return &conn->smb1.server.gss_blob;
404 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
406 if (conn->protocol >= PROTOCOL_SMB2_02) {
407 return &conn->smb2.server.guid;
410 return &conn->smb1.server.guid;
413 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
415 return conn->smb1.capabilities;
418 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
420 return conn->smb1.max_xmit;
423 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
425 return conn->smb1.server.session_key;
428 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
430 return conn->smb1.server.challenge;
433 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
435 return conn->smb1.server.security_mode;
438 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
440 return conn->smb1.server.time_zone;
443 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
444 const DATA_BLOB user_session_key,
445 const DATA_BLOB response)
447 return smb_signing_activate(conn->smb1.signing,
452 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
453 const uint8_t *buf, uint32_t seqnum)
455 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
458 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
460 return smb_signing_is_active(conn->smb1.signing);
463 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
464 struct smb_trans_enc_state *es)
466 /* Replace the old state, if any. */
467 if (conn->smb1.trans_enc) {
468 common_free_encryption_state(&conn->smb1.trans_enc);
470 conn->smb1.trans_enc = es;
473 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
475 return common_encryption_on(conn->smb1.trans_enc);
479 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
481 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
482 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
484 if (NT_STATUS_IS_OK(status)) {
488 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
492 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
496 * Is the SMB command able to hold an AND_X successor
497 * @param[in] cmd The SMB command in question
498 * @retval Can we add a chained request after "cmd"?
500 bool smb1cli_is_andx_req(uint8_t cmd)
520 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
522 size_t num_pending = talloc_array_length(conn->pending);
528 result = conn->smb1.mid++;
529 if ((result == 0) || (result == 0xffff)) {
533 for (i=0; i<num_pending; i++) {
534 if (result == smb1cli_req_mid(conn->pending[i])) {
539 if (i == num_pending) {
545 void smbXcli_req_unset_pending(struct tevent_req *req)
547 struct smbXcli_req_state *state =
549 struct smbXcli_req_state);
550 struct smbXcli_conn *conn = state->conn;
551 size_t num_pending = talloc_array_length(conn->pending);
554 if (state->smb1.mid != 0) {
556 * This is a [nt]trans[2] request which waits
557 * for more than one reply.
562 talloc_set_destructor(req, NULL);
564 if (num_pending == 1) {
566 * The pending read_smb tevent_req is a child of
567 * conn->pending. So if nothing is pending anymore, we need to
568 * delete the socket read fde.
570 TALLOC_FREE(conn->pending);
571 conn->read_smb_req = NULL;
575 for (i=0; i<num_pending; i++) {
576 if (req == conn->pending[i]) {
580 if (i == num_pending) {
582 * Something's seriously broken. Just returning here is the
583 * right thing nevertheless, the point of this routine is to
584 * remove ourselves from conn->pending.
590 * Remove ourselves from the conn->pending array
592 for (; i < (num_pending - 1); i++) {
593 conn->pending[i] = conn->pending[i+1];
597 * No NULL check here, we're shrinking by sizeof(void *), and
598 * talloc_realloc just adjusts the size for this.
600 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
605 static int smbXcli_req_destructor(struct tevent_req *req)
607 struct smbXcli_req_state *state =
609 struct smbXcli_req_state);
612 * Make sure we really remove it from
613 * the pending array on destruction.
616 smbXcli_req_unset_pending(req);
620 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
622 bool smbXcli_req_set_pending(struct tevent_req *req)
624 struct smbXcli_req_state *state =
626 struct smbXcli_req_state);
627 struct smbXcli_conn *conn;
628 struct tevent_req **pending;
633 if (!smbXcli_conn_is_connected(conn)) {
637 num_pending = talloc_array_length(conn->pending);
639 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
641 if (pending == NULL) {
644 pending[num_pending] = req;
645 conn->pending = pending;
646 talloc_set_destructor(req, smbXcli_req_destructor);
648 if (!smbXcli_conn_receive_next(conn)) {
650 * the caller should notify the current request
652 * And all other pending requests get notified
653 * by smbXcli_conn_disconnect().
655 smbXcli_req_unset_pending(req);
656 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
663 static void smbXcli_conn_received(struct tevent_req *subreq);
665 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
667 size_t num_pending = talloc_array_length(conn->pending);
668 struct tevent_req *req;
669 struct smbXcli_req_state *state;
671 if (conn->read_smb_req != NULL) {
675 if (num_pending == 0) {
676 if (conn->smb2.mid < UINT64_MAX) {
677 /* no more pending requests, so we are done for now */
682 * If there are no more SMB2 requests possible,
683 * because we are out of message ids,
684 * we need to disconnect.
686 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
690 req = conn->pending[0];
691 state = tevent_req_data(req, struct smbXcli_req_state);
694 * We're the first ones, add the read_smb request that waits for the
695 * answer from the server
697 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
698 if (conn->read_smb_req == NULL) {
701 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
705 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
707 if (conn->fd != -1) {
713 * Cancel all pending requests. We do not do a for-loop walking
714 * conn->pending because that array changes in
715 * smbXcli_req_unset_pending.
717 while (talloc_array_length(conn->pending) > 0) {
718 struct tevent_req *req;
719 struct smbXcli_req_state *state;
720 struct tevent_req **chain;
724 req = conn->pending[0];
725 state = tevent_req_data(req, struct smbXcli_req_state);
727 if (state->smb1.chained_requests == NULL) {
729 * We're dead. No point waiting for trans2
734 smbXcli_req_unset_pending(req);
736 if (NT_STATUS_IS_OK(status)) {
737 /* do not notify the callers */
742 * we need to defer the callback, because we may notify
743 * more then one caller.
745 tevent_req_defer_callback(req, state->ev);
746 tevent_req_nterror(req, status);
750 chain = talloc_move(conn, &state->smb1.chained_requests);
751 num_chained = talloc_array_length(chain);
753 for (i=0; i<num_chained; i++) {
755 state = tevent_req_data(req, struct smbXcli_req_state);
758 * We're dead. No point waiting for trans2
763 smbXcli_req_unset_pending(req);
765 if (NT_STATUS_IS_OK(status)) {
766 /* do not notify the callers */
771 * we need to defer the callback, because we may notify
772 * more then one caller.
774 tevent_req_defer_callback(req, state->ev);
775 tevent_req_nterror(req, status);
782 * Fetch a smb request's mid. Only valid after the request has been sent by
783 * smb1cli_req_send().
785 uint16_t smb1cli_req_mid(struct tevent_req *req)
787 struct smbXcli_req_state *state =
789 struct smbXcli_req_state);
791 if (state->smb1.mid != 0) {
792 return state->smb1.mid;
795 return SVAL(state->smb1.hdr, HDR_MID);
798 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
800 struct smbXcli_req_state *state =
802 struct smbXcli_req_state);
804 state->smb1.mid = mid;
807 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
809 struct smbXcli_req_state *state =
811 struct smbXcli_req_state);
813 return state->smb1.seqnum;
816 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
818 struct smbXcli_req_state *state =
820 struct smbXcli_req_state);
822 state->smb1.seqnum = seqnum;
825 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
829 for (i=0; i<count; i++) {
830 result += iov[i].iov_len;
835 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
836 const struct iovec *iov,
839 size_t len = smbXcli_iov_len(iov, count);
844 buf = talloc_array(mem_ctx, uint8_t, len);
849 for (i=0; i<count; i++) {
850 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
851 copied += iov[i].iov_len;
856 static void smb1cli_req_flags(enum protocol_types protocol,
857 uint32_t smb1_capabilities,
859 uint8_t additional_flags,
862 uint16_t additional_flags2,
863 uint16_t clear_flags2,
869 if (protocol >= PROTOCOL_LANMAN1) {
870 flags |= FLAG_CASELESS_PATHNAMES;
871 flags |= FLAG_CANONICAL_PATHNAMES;
874 if (protocol >= PROTOCOL_LANMAN2) {
875 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
876 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
879 if (protocol >= PROTOCOL_NT1) {
880 flags2 |= FLAGS2_IS_LONG_NAME;
882 if (smb1_capabilities & CAP_UNICODE) {
883 flags2 |= FLAGS2_UNICODE_STRINGS;
885 if (smb1_capabilities & CAP_STATUS32) {
886 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
888 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
889 flags2 |= FLAGS2_EXTENDED_SECURITY;
893 flags |= additional_flags;
894 flags &= ~clear_flags;
895 flags2 |= additional_flags2;
896 flags2 &= ~clear_flags2;
902 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
903 struct tevent_context *ev,
904 struct smbXcli_conn *conn,
906 uint8_t additional_flags,
908 uint16_t additional_flags2,
909 uint16_t clear_flags2,
910 uint32_t timeout_msec,
914 uint8_t wct, uint16_t *vwv,
916 struct iovec *bytes_iov)
918 struct tevent_req *req;
919 struct smbXcli_req_state *state;
923 if (iov_count > MAX_SMB_IOV) {
925 * Should not happen :-)
930 req = tevent_req_create(mem_ctx, &state,
931 struct smbXcli_req_state);
938 state->smb1.recv_cmd = 0xFF;
939 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
940 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
941 if (state->smb1.recv_iov == NULL) {
946 smb1cli_req_flags(conn->protocol,
947 conn->smb1.capabilities,
956 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
957 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
958 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
959 SCVAL(state->smb1.hdr, HDR_FLG, flags);
960 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
961 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
962 SSVAL(state->smb1.hdr, HDR_TID, tid);
963 SSVAL(state->smb1.hdr, HDR_PID, pid);
964 SSVAL(state->smb1.hdr, HDR_UID, uid);
965 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
966 SSVAL(state->smb1.hdr, HDR_WCT, wct);
968 state->smb1.vwv = vwv;
970 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
972 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
973 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
974 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
975 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
976 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
977 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
978 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
979 state->smb1.iov[3].iov_len = sizeof(uint16_t);
981 if (iov_count != 0) {
982 memcpy(&state->smb1.iov[4], bytes_iov,
983 iov_count * sizeof(*bytes_iov));
985 state->smb1.iov_count = iov_count + 4;
987 if (timeout_msec > 0) {
988 struct timeval endtime;
990 endtime = timeval_current_ofs_msec(timeout_msec);
991 if (!tevent_req_set_endtime(req, ev, endtime)) {
996 switch (smb_command) {
1001 state->one_way = true;
1005 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1006 state->one_way = true;
1014 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1015 struct iovec *iov, int iov_count,
1021 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1022 * iovec directly. MD5Update would do that just fine.
1025 if (iov_count < 4) {
1026 return NT_STATUS_INVALID_PARAMETER_MIX;
1028 if (iov[0].iov_len != NBT_HDR_SIZE) {
1029 return NT_STATUS_INVALID_PARAMETER_MIX;
1031 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1032 return NT_STATUS_INVALID_PARAMETER_MIX;
1034 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1035 return NT_STATUS_INVALID_PARAMETER_MIX;
1037 if (iov[3].iov_len != sizeof(uint16_t)) {
1038 return NT_STATUS_INVALID_PARAMETER_MIX;
1041 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1043 return NT_STATUS_NO_MEMORY;
1046 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
1047 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1048 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1051 return NT_STATUS_OK;
1054 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1055 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1056 TALLOC_CTX *tmp_mem,
1059 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1060 struct smbXcli_req_state *state,
1061 struct iovec *iov, int iov_count)
1063 struct tevent_req *subreq;
1067 if (!smbXcli_conn_is_connected(state->conn)) {
1068 return NT_STATUS_CONNECTION_DISCONNECTED;
1071 if (state->conn->protocol > PROTOCOL_NT1) {
1072 return NT_STATUS_REVISION_MISMATCH;
1075 if (iov_count < 4) {
1076 return NT_STATUS_INVALID_PARAMETER_MIX;
1078 if (iov[0].iov_len != NBT_HDR_SIZE) {
1079 return NT_STATUS_INVALID_PARAMETER_MIX;
1081 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1082 return NT_STATUS_INVALID_PARAMETER_MIX;
1084 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1085 return NT_STATUS_INVALID_PARAMETER_MIX;
1087 if (iov[3].iov_len != sizeof(uint16_t)) {
1088 return NT_STATUS_INVALID_PARAMETER_MIX;
1091 if (state->smb1.mid != 0) {
1092 mid = state->smb1.mid;
1094 mid = smb1cli_alloc_mid(state->conn);
1096 SSVAL(iov[1].iov_base, HDR_MID, mid);
1098 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1100 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1101 &state->smb1.seqnum);
1103 if (!NT_STATUS_IS_OK(status)) {
1108 * If we supported multiple encrytion contexts
1109 * here we'd look up based on tid.
1111 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1112 char *buf, *enc_buf;
1114 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1116 return NT_STATUS_NO_MEMORY;
1118 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1119 (char *)buf, &enc_buf);
1121 if (!NT_STATUS_IS_OK(status)) {
1122 DEBUG(0, ("Error in encrypting client message: %s\n",
1123 nt_errstr(status)));
1126 buf = (char *)talloc_memdup(state, enc_buf,
1127 smb_len_nbt(enc_buf)+4);
1130 return NT_STATUS_NO_MEMORY;
1132 iov[0].iov_base = (void *)buf;
1133 iov[0].iov_len = talloc_get_size(buf);
1137 if (state->conn->dispatch_incoming == NULL) {
1138 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1141 subreq = writev_send(state, state->ev, state->conn->outgoing,
1142 state->conn->fd, false, iov, iov_count);
1143 if (subreq == NULL) {
1144 return NT_STATUS_NO_MEMORY;
1146 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1147 return NT_STATUS_OK;
1150 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1151 struct tevent_context *ev,
1152 struct smbXcli_conn *conn,
1153 uint8_t smb_command,
1154 uint8_t additional_flags,
1155 uint8_t clear_flags,
1156 uint16_t additional_flags2,
1157 uint16_t clear_flags2,
1158 uint32_t timeout_msec,
1162 uint8_t wct, uint16_t *vwv,
1164 const uint8_t *bytes)
1166 struct tevent_req *req;
1170 iov.iov_base = discard_const_p(void, bytes);
1171 iov.iov_len = num_bytes;
1173 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1174 additional_flags, clear_flags,
1175 additional_flags2, clear_flags2,
1182 if (!tevent_req_is_in_progress(req)) {
1183 return tevent_req_post(req, ev);
1185 status = smb1cli_req_chain_submit(&req, 1);
1186 if (tevent_req_nterror(req, status)) {
1187 return tevent_req_post(req, ev);
1192 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1194 struct tevent_req *req =
1195 tevent_req_callback_data(subreq,
1197 struct smbXcli_req_state *state =
1198 tevent_req_data(req,
1199 struct smbXcli_req_state);
1203 nwritten = writev_recv(subreq, &err);
1204 TALLOC_FREE(subreq);
1205 if (nwritten == -1) {
1206 NTSTATUS status = map_nt_error_from_unix_common(err);
1207 smbXcli_conn_disconnect(state->conn, status);
1211 if (state->one_way) {
1212 state->inbuf = NULL;
1213 tevent_req_done(req);
1217 if (!smbXcli_req_set_pending(req)) {
1218 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1223 static void smbXcli_conn_received(struct tevent_req *subreq)
1225 struct smbXcli_conn *conn =
1226 tevent_req_callback_data(subreq,
1227 struct smbXcli_conn);
1228 TALLOC_CTX *frame = talloc_stackframe();
1234 if (subreq != conn->read_smb_req) {
1235 DEBUG(1, ("Internal error: cli_smb_received called with "
1236 "unexpected subreq\n"));
1237 status = NT_STATUS_INTERNAL_ERROR;
1238 smbXcli_conn_disconnect(conn, status);
1242 conn->read_smb_req = NULL;
1244 received = read_smb_recv(subreq, frame, &inbuf, &err);
1245 TALLOC_FREE(subreq);
1246 if (received == -1) {
1247 status = map_nt_error_from_unix_common(err);
1248 smbXcli_conn_disconnect(conn, status);
1253 status = conn->dispatch_incoming(conn, frame, inbuf);
1255 if (NT_STATUS_IS_OK(status)) {
1257 * We should not do any more processing
1258 * as the dispatch function called
1259 * tevent_req_done().
1262 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1264 * We got an error, so notify all pending requests
1266 smbXcli_conn_disconnect(conn, status);
1271 * We got NT_STATUS_RETRY, so we may ask for a
1272 * next incoming pdu.
1274 if (!smbXcli_conn_receive_next(conn)) {
1275 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1279 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1280 struct iovec **piov, int *pnum_iov)
1291 buflen = smb_len_nbt(buf);
1294 hdr = buf + NBT_HDR_SIZE;
1296 if (buflen < MIN_SMB_SIZE) {
1297 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1301 * This returns iovec elements in the following order:
1316 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1318 return NT_STATUS_NO_MEMORY;
1320 iov[0].iov_base = hdr;
1321 iov[0].iov_len = HDR_WCT;
1324 cmd = CVAL(hdr, HDR_COM);
1328 size_t len = buflen - taken;
1330 struct iovec *iov_tmp;
1337 * we need at least WCT and BCC
1339 needed = sizeof(uint8_t) + sizeof(uint16_t);
1341 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1342 __location__, (int)len, (int)needed));
1347 * Now we check if the specified words are there
1349 wct = CVAL(hdr, wct_ofs);
1350 needed += wct * sizeof(uint16_t);
1352 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1353 __location__, (int)len, (int)needed));
1358 * Now we check if the specified bytes are there
1360 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1361 bcc = SVAL(hdr, bcc_ofs);
1362 needed += bcc * sizeof(uint8_t);
1364 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1365 __location__, (int)len, (int)needed));
1370 * we allocate 2 iovec structures for words and bytes
1372 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1374 if (iov_tmp == NULL) {
1376 return NT_STATUS_NO_MEMORY;
1379 cur = &iov[num_iov];
1382 cur[0].iov_len = wct * sizeof(uint16_t);
1383 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1384 cur[1].iov_len = bcc * sizeof(uint8_t);
1385 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1389 if (!smb1cli_is_andx_req(cmd)) {
1391 * If the current command does not have AndX chanining
1397 if (wct == 0 && bcc == 0) {
1399 * An empty response also ends the chain,
1400 * most likely with an error.
1406 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1407 __location__, (int)wct, (int)cmd));
1410 cmd = CVAL(cur[0].iov_base, 0);
1413 * If it is the end of the chain we are also done.
1417 wct_ofs = SVAL(cur[0].iov_base, 2);
1419 if (wct_ofs < taken) {
1420 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1422 if (wct_ofs > buflen) {
1423 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1427 * we consumed everything up to the start of the next
1433 remaining = buflen - taken;
1435 if (remaining > 0 && num_iov >= 3) {
1437 * The last DATA block gets the remaining
1438 * bytes, this is needed to support
1439 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1441 iov[num_iov-1].iov_len += remaining;
1445 *pnum_iov = num_iov;
1446 return NT_STATUS_OK;
1450 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1453 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1454 TALLOC_CTX *tmp_mem,
1457 struct tevent_req *req;
1458 struct smbXcli_req_state *state;
1465 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1466 struct iovec *iov = NULL;
1468 struct tevent_req **chain = NULL;
1469 size_t num_chained = 0;
1470 size_t num_responses = 0;
1472 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1473 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1474 DEBUG(10, ("Got non-SMB PDU\n"));
1475 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1479 * If we supported multiple encrytion contexts
1480 * here we'd look up based on tid.
1482 if (common_encryption_on(conn->smb1.trans_enc)
1483 && (CVAL(inbuf, 0) == 0)) {
1484 uint16_t enc_ctx_num;
1486 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1487 if (!NT_STATUS_IS_OK(status)) {
1488 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1489 nt_errstr(status)));
1493 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1494 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1496 conn->smb1.trans_enc->enc_ctx_num));
1497 return NT_STATUS_INVALID_HANDLE;
1500 status = common_decrypt_buffer(conn->smb1.trans_enc,
1502 if (!NT_STATUS_IS_OK(status)) {
1503 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1504 nt_errstr(status)));
1509 mid = SVAL(inhdr, HDR_MID);
1510 num_pending = talloc_array_length(conn->pending);
1512 for (i=0; i<num_pending; i++) {
1513 if (mid == smb1cli_req_mid(conn->pending[i])) {
1517 if (i == num_pending) {
1518 /* Dump unexpected reply */
1519 return NT_STATUS_RETRY;
1522 oplock_break = false;
1524 if (mid == 0xffff) {
1526 * Paranoia checks that this is really an oplock break request.
1528 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1529 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1530 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1531 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1532 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1534 if (!oplock_break) {
1535 /* Dump unexpected reply */
1536 return NT_STATUS_RETRY;
1540 req = conn->pending[i];
1541 state = tevent_req_data(req, struct smbXcli_req_state);
1543 if (!oplock_break /* oplock breaks are not signed */
1544 && !smb_signing_check_pdu(conn->smb1.signing,
1545 inbuf, state->smb1.seqnum+1)) {
1546 DEBUG(10, ("cli_check_sign_mac failed\n"));
1547 return NT_STATUS_ACCESS_DENIED;
1550 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1552 if (!NT_STATUS_IS_OK(status)) {
1553 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1554 nt_errstr(status)));
1558 cmd = CVAL(inhdr, HDR_COM);
1559 status = smb1cli_pull_raw_error(inhdr);
1561 if (state->smb1.chained_requests == NULL) {
1563 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1566 smbXcli_req_unset_pending(req);
1568 state->smb1.recv_cmd = cmd;
1569 state->smb1.recv_status = status;
1570 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1572 state->smb1.recv_iov[0] = iov[0];
1573 state->smb1.recv_iov[1] = iov[1];
1574 state->smb1.recv_iov[2] = iov[2];
1576 if (talloc_array_length(conn->pending) == 0) {
1577 tevent_req_done(req);
1578 return NT_STATUS_OK;
1581 tevent_req_defer_callback(req, state->ev);
1582 tevent_req_done(req);
1583 return NT_STATUS_RETRY;
1586 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1587 num_chained = talloc_array_length(chain);
1588 num_responses = (num_iov - 1)/2;
1590 if (num_responses > num_chained) {
1591 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1594 for (i=0; i<num_chained; i++) {
1595 size_t iov_idx = 1 + (i*2);
1596 struct iovec *cur = &iov[iov_idx];
1600 state = tevent_req_data(req, struct smbXcli_req_state);
1602 smbXcli_req_unset_pending(req);
1605 * as we finish multiple requests here
1606 * we need to defer the callbacks as
1607 * they could destroy our current stack state.
1609 tevent_req_defer_callback(req, state->ev);
1611 if (i >= num_responses) {
1612 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1616 state->smb1.recv_cmd = cmd;
1618 if (i == (num_responses - 1)) {
1620 * The last request in the chain gets the status
1622 state->smb1.recv_status = status;
1624 cmd = CVAL(cur[0].iov_base, 0);
1625 state->smb1.recv_status = NT_STATUS_OK;
1628 state->inbuf = inbuf;
1631 * Note: here we use talloc_reference() in a way
1632 * that does not expose it to the caller.
1634 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1635 if (tevent_req_nomem(inbuf_ref, req)) {
1639 /* copy the related buffers */
1640 state->smb1.recv_iov[0] = iov[0];
1641 state->smb1.recv_iov[1] = cur[0];
1642 state->smb1.recv_iov[2] = cur[1];
1644 tevent_req_done(req);
1647 return NT_STATUS_RETRY;
1650 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1651 TALLOC_CTX *mem_ctx,
1652 struct iovec **piov,
1656 uint32_t *pvwv_offset,
1657 uint32_t *pnum_bytes,
1659 uint32_t *pbytes_offset,
1661 const struct smb1cli_req_expected_response *expected,
1662 size_t num_expected)
1664 struct smbXcli_req_state *state =
1665 tevent_req_data(req,
1666 struct smbXcli_req_state);
1667 NTSTATUS status = NT_STATUS_OK;
1668 struct iovec *recv_iov = NULL;
1669 uint8_t *hdr = NULL;
1671 uint32_t vwv_offset = 0;
1672 uint16_t *vwv = NULL;
1673 uint32_t num_bytes = 0;
1674 uint32_t bytes_offset = 0;
1675 uint8_t *bytes = NULL;
1677 bool found_status = false;
1678 bool found_size = false;
1692 if (pvwv_offset != NULL) {
1695 if (pnum_bytes != NULL) {
1698 if (pbytes != NULL) {
1701 if (pbytes_offset != NULL) {
1704 if (pinbuf != NULL) {
1708 if (state->inbuf != NULL) {
1709 recv_iov = state->smb1.recv_iov;
1710 hdr = (uint8_t *)recv_iov[0].iov_base;
1711 wct = recv_iov[1].iov_len/2;
1712 vwv = (uint16_t *)recv_iov[1].iov_base;
1713 vwv_offset = PTR_DIFF(vwv, hdr);
1714 num_bytes = recv_iov[2].iov_len;
1715 bytes = (uint8_t *)recv_iov[2].iov_base;
1716 bytes_offset = PTR_DIFF(bytes, hdr);
1719 if (tevent_req_is_nterror(req, &status)) {
1720 for (i=0; i < num_expected; i++) {
1721 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1722 found_status = true;
1728 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1734 if (num_expected == 0) {
1735 found_status = true;
1739 status = state->smb1.recv_status;
1741 for (i=0; i < num_expected; i++) {
1742 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1746 found_status = true;
1747 if (expected[i].wct == 0) {
1752 if (expected[i].wct == wct) {
1758 if (!found_status) {
1763 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1767 *piov = talloc_move(mem_ctx, &recv_iov);
1779 if (pvwv_offset != NULL) {
1780 *pvwv_offset = vwv_offset;
1782 if (pnum_bytes != NULL) {
1783 *pnum_bytes = num_bytes;
1785 if (pbytes != NULL) {
1788 if (pbytes_offset != NULL) {
1789 *pbytes_offset = bytes_offset;
1791 if (pinbuf != NULL) {
1792 *pinbuf = state->inbuf;
1798 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1805 for (i=0; i<num_reqs; i++) {
1806 struct smbXcli_req_state *state;
1807 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1808 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1809 state->smb1.iov_count-2);
1810 wct_ofs = (wct_ofs + 3) & ~3;
1815 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1817 struct smbXcli_req_state *first_state =
1818 tevent_req_data(reqs[0],
1819 struct smbXcli_req_state);
1820 struct smbXcli_req_state *state;
1822 size_t chain_padding = 0;
1824 struct iovec *iov = NULL;
1825 struct iovec *this_iov;
1829 if (num_reqs == 1) {
1830 return smb1cli_req_writev_submit(reqs[0], first_state,
1831 first_state->smb1.iov,
1832 first_state->smb1.iov_count);
1836 for (i=0; i<num_reqs; i++) {
1837 if (!tevent_req_is_in_progress(reqs[i])) {
1838 return NT_STATUS_INTERNAL_ERROR;
1841 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1843 if (state->smb1.iov_count < 4) {
1844 return NT_STATUS_INVALID_PARAMETER_MIX;
1849 * The NBT and SMB header
1862 iovlen += state->smb1.iov_count - 2;
1865 iov = talloc_zero_array(first_state, struct iovec, iovlen);
1867 return NT_STATUS_NO_MEMORY;
1870 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1871 first_state, reqs, sizeof(*reqs) * num_reqs);
1872 if (first_state->smb1.chained_requests == NULL) {
1874 return NT_STATUS_NO_MEMORY;
1877 wct_offset = HDR_WCT;
1880 for (i=0; i<num_reqs; i++) {
1881 size_t next_padding = 0;
1884 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1886 if (i < num_reqs-1) {
1887 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1888 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1890 TALLOC_FREE(first_state->smb1.chained_requests);
1891 return NT_STATUS_INVALID_PARAMETER_MIX;
1895 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1896 state->smb1.iov_count-2) + 1;
1897 if ((wct_offset % 4) != 0) {
1898 next_padding = 4 - (wct_offset % 4);
1900 wct_offset += next_padding;
1901 vwv = state->smb1.vwv;
1903 if (i < num_reqs-1) {
1904 struct smbXcli_req_state *next_state =
1905 tevent_req_data(reqs[i+1],
1906 struct smbXcli_req_state);
1907 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1909 SSVAL(vwv+1, 0, wct_offset);
1910 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1911 /* properly end the chain */
1912 SCVAL(vwv+0, 0, 0xff);
1913 SCVAL(vwv+0, 1, 0xff);
1919 * The NBT and SMB header
1921 this_iov[0] = state->smb1.iov[0];
1922 this_iov[1] = state->smb1.iov[1];
1926 * This one is a bit subtle. We have to add
1927 * chain_padding bytes between the requests, and we
1928 * have to also include the wct field of the
1929 * subsequent requests. We use the subsequent header
1930 * for the padding, it contains the wct field in its
1933 this_iov[0].iov_len = chain_padding+1;
1934 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1935 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1936 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1941 * copy the words and bytes
1943 memcpy(this_iov, state->smb1.iov+2,
1944 sizeof(struct iovec) * (state->smb1.iov_count-2));
1945 this_iov += state->smb1.iov_count - 2;
1946 chain_padding = next_padding;
1949 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1950 if (nbt_len > first_state->conn->smb1.max_xmit) {
1952 TALLOC_FREE(first_state->smb1.chained_requests);
1953 return NT_STATUS_INVALID_PARAMETER_MIX;
1956 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
1957 if (!NT_STATUS_IS_OK(status)) {
1959 TALLOC_FREE(first_state->smb1.chained_requests);
1963 return NT_STATUS_OK;
1966 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1968 return ((tevent_queue_length(conn->outgoing) != 0)
1969 || (talloc_array_length(conn->pending) != 0));
1972 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
1974 return conn->smb2.server.capabilities;
1977 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
1979 return conn->smb2.server.security_mode;
1982 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
1984 return conn->smb2.server.max_trans_size;
1987 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
1989 return conn->smb2.server.max_read_size;
1992 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
1994 return conn->smb2.server.max_write_size;
1997 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
1998 uint16_t max_credits)
2000 conn->smb2.max_credits = max_credits;
2003 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2004 struct tevent_context *ev,
2005 struct smbXcli_conn *conn,
2007 uint32_t additional_flags,
2008 uint32_t clear_flags,
2009 uint32_t timeout_msec,
2012 struct smbXcli_session *session,
2013 const uint8_t *fixed,
2018 struct tevent_req *req;
2019 struct smbXcli_req_state *state;
2023 req = tevent_req_create(mem_ctx, &state,
2024 struct smbXcli_req_state);
2031 state->session = session;
2034 uid = session->smb2.session_id;
2037 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2038 if (state->smb2.recv_iov == NULL) {
2043 flags |= additional_flags;
2044 flags &= ~clear_flags;
2046 state->smb2.fixed = fixed;
2047 state->smb2.fixed_len = fixed_len;
2048 state->smb2.dyn = dyn;
2049 state->smb2.dyn_len = dyn_len;
2051 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2052 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2053 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2054 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2055 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2056 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2057 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2060 case SMB2_OP_CANCEL:
2061 state->one_way = true;
2065 * If this is a dummy request, it will have
2066 * UINT64_MAX as message id.
2067 * If we send on break acknowledgement,
2068 * this gets overwritten later.
2070 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2074 if (timeout_msec > 0) {
2075 struct timeval endtime;
2077 endtime = timeval_current_ofs_msec(timeout_msec);
2078 if (!tevent_req_set_endtime(req, ev, endtime)) {
2086 static void smb2cli_writev_done(struct tevent_req *subreq);
2087 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2088 TALLOC_CTX *tmp_mem,
2091 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2094 struct smbXcli_req_state *state;
2095 struct tevent_req *subreq;
2097 int i, num_iov, nbt_len;
2100 * 1 for the nbt length
2101 * per request: HDR, fixed, dyn, padding
2102 * -1 because the last one does not need padding
2105 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2107 return NT_STATUS_NO_MEMORY;
2113 for (i=0; i<num_reqs; i++) {
2121 bool should_sign = false;
2123 if (!tevent_req_is_in_progress(reqs[i])) {
2124 return NT_STATUS_INTERNAL_ERROR;
2127 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2129 if (!smbXcli_conn_is_connected(state->conn)) {
2130 return NT_STATUS_CONNECTION_DISCONNECTED;
2133 if ((state->conn->protocol != PROTOCOL_NONE) &&
2134 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2135 return NT_STATUS_REVISION_MISMATCH;
2138 avail = UINT64_MAX - state->conn->smb2.mid;
2140 return NT_STATUS_CONNECTION_ABORTED;
2143 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2144 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2149 charge = MAX(state->smb2.credit_charge, charge);
2151 avail = MIN(avail, state->conn->smb2.cur_credits);
2152 if (avail < charge) {
2153 return NT_STATUS_INTERNAL_ERROR;
2157 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2158 credits = state->conn->smb2.max_credits -
2159 state->conn->smb2.cur_credits;
2161 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2165 mid = state->conn->smb2.mid;
2166 state->conn->smb2.mid += charge;
2167 state->conn->smb2.cur_credits -= charge;
2169 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2170 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2172 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2173 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2176 iov[num_iov].iov_base = state->smb2.hdr;
2177 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2180 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2181 iov[num_iov].iov_len = state->smb2.fixed_len;
2184 if (state->smb2.dyn != NULL) {
2185 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2186 iov[num_iov].iov_len = state->smb2.dyn_len;
2190 reqlen = sizeof(state->smb2.hdr);
2191 reqlen += state->smb2.fixed_len;
2192 reqlen += state->smb2.dyn_len;
2194 if (i < num_reqs-1) {
2195 if ((reqlen % 8) > 0) {
2196 uint8_t pad = 8 - (reqlen % 8);
2197 iov[num_iov].iov_base = state->smb2.pad;
2198 iov[num_iov].iov_len = pad;
2202 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2206 if (state->session) {
2207 should_sign = state->session->smb2.should_sign;
2208 if (state->session->smb2.channel_setup) {
2216 status = smb2_signing_sign_pdu(state->session->smb2.signing_key,
2217 &iov[hdr_iov], num_iov - hdr_iov);
2218 if (!NT_STATUS_IS_OK(status)) {
2223 ret = smbXcli_req_set_pending(reqs[i]);
2225 return NT_STATUS_NO_MEMORY;
2229 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2230 _smb_setlen_tcp(state->length_hdr, nbt_len);
2231 iov[0].iov_base = state->length_hdr;
2232 iov[0].iov_len = sizeof(state->length_hdr);
2234 if (state->conn->dispatch_incoming == NULL) {
2235 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2238 subreq = writev_send(state, state->ev, state->conn->outgoing,
2239 state->conn->fd, false, iov, num_iov);
2240 if (subreq == NULL) {
2241 return NT_STATUS_NO_MEMORY;
2243 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2244 return NT_STATUS_OK;
2247 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2249 struct smbXcli_req_state *state =
2250 tevent_req_data(req,
2251 struct smbXcli_req_state);
2253 state->smb2.credit_charge = charge;
2256 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2257 struct tevent_context *ev,
2258 struct smbXcli_conn *conn,
2260 uint32_t additional_flags,
2261 uint32_t clear_flags,
2262 uint32_t timeout_msec,
2265 struct smbXcli_session *session,
2266 const uint8_t *fixed,
2271 struct tevent_req *req;
2274 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2275 additional_flags, clear_flags,
2278 fixed, fixed_len, dyn, dyn_len);
2282 if (!tevent_req_is_in_progress(req)) {
2283 return tevent_req_post(req, ev);
2285 status = smb2cli_req_compound_submit(&req, 1);
2286 if (tevent_req_nterror(req, status)) {
2287 return tevent_req_post(req, ev);
2292 static void smb2cli_writev_done(struct tevent_req *subreq)
2294 struct tevent_req *req =
2295 tevent_req_callback_data(subreq,
2297 struct smbXcli_req_state *state =
2298 tevent_req_data(req,
2299 struct smbXcli_req_state);
2303 nwritten = writev_recv(subreq, &err);
2304 TALLOC_FREE(subreq);
2305 if (nwritten == -1) {
2306 /* here, we need to notify all pending requests */
2307 NTSTATUS status = map_nt_error_from_unix_common(err);
2308 smbXcli_conn_disconnect(state->conn, status);
2313 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2314 struct iovec **piov, int *pnum_iov)
2324 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2326 return NT_STATUS_NO_MEMORY;
2329 buflen = smb_len_tcp(buf);
2331 first_hdr = buf + NBT_HDR_SIZE;
2333 while (taken < buflen) {
2334 size_t len = buflen - taken;
2335 uint8_t *hdr = first_hdr + taken;
2338 size_t next_command_ofs;
2340 struct iovec *iov_tmp;
2343 * We need the header plus the body length field
2346 if (len < SMB2_HDR_BODY + 2) {
2347 DEBUG(10, ("%d bytes left, expected at least %d\n",
2348 (int)len, SMB2_HDR_BODY));
2351 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2352 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2356 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2357 DEBUG(10, ("Got HDR len %d, expected %d\n",
2358 SVAL(hdr, 4), SMB2_HDR_BODY));
2363 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2364 body_size = SVAL(hdr, SMB2_HDR_BODY);
2366 if (next_command_ofs != 0) {
2367 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2370 if (next_command_ofs > full_size) {
2373 full_size = next_command_ofs;
2375 if (body_size < 2) {
2378 body_size &= 0xfffe;
2380 if (body_size > (full_size - SMB2_HDR_BODY)) {
2384 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2386 if (iov_tmp == NULL) {
2388 return NT_STATUS_NO_MEMORY;
2391 cur = &iov[num_iov];
2394 cur[0].iov_base = hdr;
2395 cur[0].iov_len = SMB2_HDR_BODY;
2396 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2397 cur[1].iov_len = body_size;
2398 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2399 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2405 *pnum_iov = num_iov;
2406 return NT_STATUS_OK;
2410 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2413 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2416 size_t num_pending = talloc_array_length(conn->pending);
2419 for (i=0; i<num_pending; i++) {
2420 struct tevent_req *req = conn->pending[i];
2421 struct smbXcli_req_state *state =
2422 tevent_req_data(req,
2423 struct smbXcli_req_state);
2425 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2432 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2433 TALLOC_CTX *tmp_mem,
2436 struct tevent_req *req;
2437 struct smbXcli_req_state *state = NULL;
2442 struct smbXcli_session *last_session = NULL;
2444 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2446 if (!NT_STATUS_IS_OK(status)) {
2450 for (i=0; i<num_iov; i+=3) {
2451 uint8_t *inbuf_ref = NULL;
2452 struct iovec *cur = &iov[i];
2453 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2454 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2455 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2456 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2457 uint16_t req_opcode;
2459 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2460 uint32_t new_credits;
2461 struct smbXcli_session *session = NULL;
2462 const DATA_BLOB *signing_key = NULL;
2463 bool should_sign = false;
2465 new_credits = conn->smb2.cur_credits;
2466 new_credits += credits;
2467 if (new_credits > UINT16_MAX) {
2468 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2470 conn->smb2.cur_credits += credits;
2472 req = smb2cli_conn_find_pending(conn, mid);
2474 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2476 state = tevent_req_data(req, struct smbXcli_req_state);
2478 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2479 if (opcode != req_opcode) {
2480 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2482 req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2484 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2485 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2488 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2489 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2490 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2491 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2494 * async interim responses are not signed,
2495 * even if the SMB2_HDR_FLAG_SIGNED flag
2498 req_flags |= SMB2_HDR_FLAG_ASYNC;
2499 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2500 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2504 session = state->session;
2505 if (req_flags & SMB2_HDR_FLAG_CHAINED) {
2506 session = last_session;
2508 last_session = session;
2511 should_sign = session->smb2.should_sign;
2512 if (session->smb2.channel_setup) {
2518 if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
2519 return NT_STATUS_ACCESS_DENIED;
2523 if (flags & SMB2_HDR_FLAG_SIGNED) {
2524 uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2526 if (session == NULL) {
2527 struct smbXcli_session *s;
2529 s = state->conn->sessions;
2530 for (; s; s = s->next) {
2531 if (s->smb2.session_id != uid) {
2540 if (session == NULL) {
2541 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2544 last_session = session;
2545 signing_key = &session->smb2.signing_key;
2548 if ((opcode == SMB2_OP_SESSSETUP) &&
2549 NT_STATUS_IS_OK(status)) {
2551 * the caller has to check the signing
2552 * as only the caller knows the correct
2558 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
2560 * if the server returns NT_STATUS_USER_SESSION_DELETED
2561 * the response is not signed and we should
2562 * propagate the NT_STATUS_USER_SESSION_DELETED
2563 * status to the caller.
2570 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) ||
2571 NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) ||
2572 NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) {
2574 * if the server returns
2575 * NT_STATUS_NETWORK_NAME_DELETED
2576 * NT_STATUS_FILE_CLOSED
2577 * NT_STATUS_INVALID_PARAMETER
2578 * the response might not be signed
2579 * as this happens before the signing checks.
2581 * If server echos the signature (or all zeros)
2582 * we should report the status from the server
2588 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2589 state->smb2.hdr+SMB2_HDR_SIGNATURE,
2592 state->smb2.signing_skipped = true;
2598 static const uint8_t zeros[16];
2600 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2604 state->smb2.signing_skipped = true;
2611 status = smb2_signing_check_pdu(*signing_key, cur, 3);
2612 if (!NT_STATUS_IS_OK(status)) {
2614 * If the signing check fails, we disconnect
2621 smbXcli_req_unset_pending(req);
2624 * There might be more than one response
2625 * we need to defer the notifications
2627 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2632 tevent_req_defer_callback(req, state->ev);
2636 * Note: here we use talloc_reference() in a way
2637 * that does not expose it to the caller.
2639 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2640 if (tevent_req_nomem(inbuf_ref, req)) {
2644 /* copy the related buffers */
2645 state->smb2.recv_iov[0] = cur[0];
2646 state->smb2.recv_iov[1] = cur[1];
2647 state->smb2.recv_iov[2] = cur[2];
2649 tevent_req_done(req);
2653 return NT_STATUS_RETRY;
2656 return NT_STATUS_OK;
2659 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2660 struct iovec **piov,
2661 const struct smb2cli_req_expected_response *expected,
2662 size_t num_expected)
2664 struct smbXcli_req_state *state =
2665 tevent_req_data(req,
2666 struct smbXcli_req_state);
2669 bool found_status = false;
2670 bool found_size = false;
2677 if (tevent_req_is_nterror(req, &status)) {
2678 for (i=0; i < num_expected; i++) {
2679 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2680 found_status = true;
2686 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2692 if (num_expected == 0) {
2693 found_status = true;
2697 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2698 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2700 for (i=0; i < num_expected; i++) {
2701 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2705 found_status = true;
2706 if (expected[i].body_size == 0) {
2711 if (expected[i].body_size == body_size) {
2717 if (!found_status) {
2721 if (state->smb2.signing_skipped) {
2722 if (num_expected > 0) {
2723 return NT_STATUS_ACCESS_DENIED;
2725 if (!NT_STATUS_IS_ERR(status)) {
2726 return NT_STATUS_ACCESS_DENIED;
2731 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2735 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
2741 static const struct {
2742 enum protocol_types proto;
2743 const char *smb1_name;
2744 } smb1cli_prots[] = {
2745 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
2746 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
2747 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
2748 {PROTOCOL_LANMAN1, "LANMAN1.0"},
2749 {PROTOCOL_LANMAN2, "LM1.2X002"},
2750 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
2751 {PROTOCOL_LANMAN2, "LANMAN2.1"},
2752 {PROTOCOL_LANMAN2, "Samba"},
2753 {PROTOCOL_NT1, "NT LANMAN 1.0"},
2754 {PROTOCOL_NT1, "NT LM 0.12"},
2755 {PROTOCOL_SMB2_02, "SMB 2.002"},
2756 {PROTOCOL_SMB2_10, "SMB 2.???"},
2759 static const struct {
2760 enum protocol_types proto;
2761 uint16_t smb2_dialect;
2762 } smb2cli_prots[] = {
2763 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
2764 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
2765 {PROTOCOL_SMB2_22, SMB2_DIALECT_REVISION_222},
2768 struct smbXcli_negprot_state {
2769 struct smbXcli_conn *conn;
2770 struct tevent_context *ev;
2771 uint32_t timeout_msec;
2772 enum protocol_types min_protocol;
2773 enum protocol_types max_protocol;
2777 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
2781 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
2782 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
2783 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
2784 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
2785 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
2786 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
2790 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
2791 struct tevent_context *ev,
2792 struct smbXcli_conn *conn,
2793 uint32_t timeout_msec,
2794 enum protocol_types min_protocol,
2795 enum protocol_types max_protocol)
2797 struct tevent_req *req, *subreq;
2798 struct smbXcli_negprot_state *state;
2800 req = tevent_req_create(mem_ctx, &state,
2801 struct smbXcli_negprot_state);
2807 state->timeout_msec = timeout_msec;
2808 state->min_protocol = min_protocol;
2809 state->max_protocol = max_protocol;
2811 if (min_protocol == PROTOCOL_NONE) {
2812 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2813 return tevent_req_post(req, ev);
2816 if (max_protocol == PROTOCOL_NONE) {
2817 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2818 return tevent_req_post(req, ev);
2821 if (min_protocol > max_protocol) {
2822 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2823 return tevent_req_post(req, ev);
2826 if ((min_protocol < PROTOCOL_SMB2_02) &&
2827 (max_protocol < PROTOCOL_SMB2_02)) {
2831 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
2833 subreq = smbXcli_negprot_smb1_subreq(state);
2834 if (tevent_req_nomem(subreq, req)) {
2835 return tevent_req_post(req, ev);
2837 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
2841 if ((min_protocol >= PROTOCOL_SMB2_02) &&
2842 (max_protocol >= PROTOCOL_SMB2_02)) {
2846 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2848 subreq = smbXcli_negprot_smb2_subreq(state);
2849 if (tevent_req_nomem(subreq, req)) {
2850 return tevent_req_post(req, ev);
2852 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
2857 * We send an SMB1 negprot with the SMB2 dialects
2858 * and expect a SMB1 or a SMB2 response.
2860 * smbXcli_negprot_dispatch_incoming() will fix the
2861 * callback to match protocol of the response.
2863 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
2865 subreq = smbXcli_negprot_smb1_subreq(state);
2866 if (tevent_req_nomem(subreq, req)) {
2867 return tevent_req_post(req, ev);
2869 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
2873 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
2875 struct tevent_req *req =
2876 tevent_req_callback_data(subreq,
2881 * we just want the low level error
2883 status = tevent_req_simple_recv_ntstatus(subreq);
2884 TALLOC_FREE(subreq);
2885 if (tevent_req_nterror(req, status)) {
2889 /* this should never happen */
2890 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2893 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
2896 DATA_BLOB bytes = data_blob_null;
2900 /* setup the protocol strings */
2901 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2905 if (smb1cli_prots[i].proto < state->min_protocol) {
2909 if (smb1cli_prots[i].proto > state->max_protocol) {
2913 ok = data_blob_append(state, &bytes, &c, sizeof(c));
2919 * We now it is already ascii and
2920 * we want NULL termination.
2922 ok = data_blob_append(state, &bytes,
2923 smb1cli_prots[i].smb1_name,
2924 strlen(smb1cli_prots[i].smb1_name)+1);
2930 smb1cli_req_flags(state->max_protocol,
2931 state->conn->smb1.client.capabilities,
2936 return smb1cli_req_send(state, state->ev, state->conn,
2940 state->timeout_msec,
2941 0xFFFE, 0, 0, /* pid, tid, uid */
2942 0, NULL, /* wct, vwv */
2943 bytes.length, bytes.data);
2946 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
2948 struct tevent_req *req =
2949 tevent_req_callback_data(subreq,
2951 struct smbXcli_negprot_state *state =
2952 tevent_req_data(req,
2953 struct smbXcli_negprot_state);
2954 struct smbXcli_conn *conn = state->conn;
2955 struct iovec *recv_iov = NULL;
2964 size_t num_prots = 0;
2966 uint32_t client_capabilities = conn->smb1.client.capabilities;
2967 uint32_t both_capabilities;
2968 uint32_t server_capabilities = 0;
2969 uint32_t capabilities;
2970 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
2971 uint32_t server_max_xmit = 0;
2973 uint32_t server_max_mux = 0;
2974 uint16_t server_security_mode = 0;
2975 uint32_t server_session_key = 0;
2976 bool server_readbraw = false;
2977 bool server_writebraw = false;
2978 bool server_lockread = false;
2979 bool server_writeunlock = false;
2980 struct GUID server_guid = GUID_zero();
2981 DATA_BLOB server_gss_blob = data_blob_null;
2982 uint8_t server_challenge[8];
2983 char *server_workgroup = NULL;
2984 char *server_name = NULL;
2985 int server_time_zone = 0;
2986 NTTIME server_system_time = 0;
2987 static const struct smb1cli_req_expected_response expected[] = {
2989 .status = NT_STATUS_OK,
2990 .wct = 0x11, /* NT1 */
2993 .status = NT_STATUS_OK,
2994 .wct = 0x0D, /* LM */
2997 .status = NT_STATUS_OK,
2998 .wct = 0x01, /* CORE */
3002 ZERO_STRUCT(server_challenge);
3004 status = smb1cli_req_recv(subreq, state,
3009 NULL, /* pvwv_offset */
3012 NULL, /* pbytes_offset */
3014 expected, ARRAY_SIZE(expected));
3015 TALLOC_FREE(subreq);
3016 if (tevent_req_nterror(req, status)) {
3020 flags = CVAL(inhdr, HDR_FLG);
3022 protnum = SVAL(vwv, 0);
3024 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3025 if (smb1cli_prots[i].proto < state->min_protocol) {
3029 if (smb1cli_prots[i].proto > state->max_protocol) {
3033 if (protnum != num_prots) {
3038 conn->protocol = smb1cli_prots[i].proto;
3042 if (conn->protocol == PROTOCOL_NONE) {
3043 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3047 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
3048 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
3049 "and the selected protocol level doesn't support it.\n"));
3050 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3054 if (flags & FLAG_SUPPORT_LOCKREAD) {
3055 server_lockread = true;
3056 server_writeunlock = true;
3059 if (conn->protocol >= PROTOCOL_NT1) {
3060 const char *client_signing = NULL;
3061 bool server_mandatory = false;
3062 bool server_allowed = false;
3063 const char *server_signing = NULL;
3068 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3073 server_security_mode = CVAL(vwv + 1, 0);
3074 server_max_mux = SVAL(vwv + 1, 1);
3075 server_max_xmit = IVAL(vwv + 3, 1);
3076 server_session_key = IVAL(vwv + 7, 1);
3077 server_time_zone = SVALS(vwv + 15, 1);
3078 server_time_zone *= 60;
3079 /* this time arrives in real GMT */
3080 server_system_time = BVAL(vwv + 11, 1);
3081 server_capabilities = IVAL(vwv + 9, 1);
3083 key_len = CVAL(vwv + 16, 1);
3085 if (server_capabilities & CAP_RAW_MODE) {
3086 server_readbraw = true;
3087 server_writebraw = true;
3089 if (server_capabilities & CAP_LOCK_AND_READ) {
3090 server_lockread = true;
3093 if (server_capabilities & CAP_EXTENDED_SECURITY) {
3094 DATA_BLOB blob1, blob2;
3096 if (num_bytes < 16) {
3097 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3101 blob1 = data_blob_const(bytes, 16);
3102 status = GUID_from_data_blob(&blob1, &server_guid);
3103 if (tevent_req_nterror(req, status)) {
3107 blob1 = data_blob_const(bytes+16, num_bytes-16);
3108 blob2 = data_blob_dup_talloc(state, blob1);
3109 if (blob1.length > 0 &&
3110 tevent_req_nomem(blob2.data, req)) {
3113 server_gss_blob = blob2;
3115 DATA_BLOB blob1, blob2;
3117 if (num_bytes < key_len) {
3118 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3122 if (key_len != 0 && key_len != 8) {
3123 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3128 memcpy(server_challenge, bytes, 8);
3131 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3132 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
3133 if (blob1.length > 0) {
3136 len = utf16_len_n(blob1.data,
3140 ok = convert_string_talloc(state,
3148 status = map_nt_error_from_unix_common(errno);
3149 tevent_req_nterror(req, status);
3154 blob2.data += blob1.length;
3155 blob2.length -= blob1.length;
3156 if (blob2.length > 0) {
3159 len = utf16_len_n(blob1.data,
3163 ok = convert_string_talloc(state,
3171 status = map_nt_error_from_unix_common(errno);
3172 tevent_req_nterror(req, status);
3178 client_signing = "disabled";
3179 if (conn->allow_signing) {
3180 client_signing = "allowed";
3182 if (conn->mandatory_signing) {
3183 client_signing = "required";
3186 server_signing = "not supported";
3187 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3188 server_signing = "supported";
3189 server_allowed = true;
3191 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3192 server_signing = "required";
3193 server_mandatory = true;
3196 ok = smb_signing_set_negotiated(conn->smb1.signing,
3200 DEBUG(1,("cli_negprot: SMB signing is required, "
3201 "but client[%s] and server[%s] mismatch\n",
3202 client_signing, server_signing));
3203 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3207 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3213 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3217 server_security_mode = SVAL(vwv + 1, 0);
3218 server_max_xmit = SVAL(vwv + 2, 0);
3219 server_max_mux = SVAL(vwv + 3, 0);
3220 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3221 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3222 server_session_key = IVAL(vwv + 6, 0);
3223 server_time_zone = SVALS(vwv + 10, 0);
3224 server_time_zone *= 60;
3225 /* this time is converted to GMT by make_unix_date */
3226 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3227 unix_to_nt_time(&server_system_time, t);
3228 key_len = SVAL(vwv + 11, 0);
3230 if (num_bytes < key_len) {
3231 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3235 if (key_len != 0 && key_len != 8) {
3236 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3241 memcpy(server_challenge, bytes, 8);
3244 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3245 if (blob1.length > 0) {
3249 len = utf16_len_n(blob1.data,
3253 ok = convert_string_talloc(state,
3261 status = map_nt_error_from_unix_common(errno);
3262 tevent_req_nterror(req, status);
3268 /* the old core protocol */
3269 server_time_zone = get_time_zone(time(NULL));
3270 server_max_xmit = 1024;
3274 if (server_max_xmit < 1024) {
3275 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3279 if (server_max_mux < 1) {
3280 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3285 * Now calculate the negotiated capabilities
3286 * based on the mask for:
3287 * - client only flags
3288 * - flags used in both directions
3289 * - server only flags
3291 both_capabilities = client_capabilities & server_capabilities;
3292 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3293 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3294 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3296 max_xmit = MIN(client_max_xmit, server_max_xmit);
3298 conn->smb1.server.capabilities = server_capabilities;
3299 conn->smb1.capabilities = capabilities;
3301 conn->smb1.server.max_xmit = server_max_xmit;
3302 conn->smb1.max_xmit = max_xmit;
3304 conn->smb1.server.max_mux = server_max_mux;
3306 conn->smb1.server.security_mode = server_security_mode;
3308 conn->smb1.server.readbraw = server_readbraw;
3309 conn->smb1.server.writebraw = server_writebraw;
3310 conn->smb1.server.lockread = server_lockread;
3311 conn->smb1.server.writeunlock = server_writeunlock;
3313 conn->smb1.server.session_key = server_session_key;
3315 talloc_steal(conn, server_gss_blob.data);
3316 conn->smb1.server.gss_blob = server_gss_blob;
3317 conn->smb1.server.guid = server_guid;
3318 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3319 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3320 conn->smb1.server.name = talloc_move(conn, &server_name);
3322 conn->smb1.server.time_zone = server_time_zone;
3323 conn->smb1.server.system_time = server_system_time;
3325 tevent_req_done(req);
3328 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3332 uint16_t dialect_count = 0;
3334 buf = state->smb2.dyn;
3335 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3336 if (smb2cli_prots[i].proto < state->min_protocol) {
3340 if (smb2cli_prots[i].proto > state->max_protocol) {
3344 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3348 buf = state->smb2.fixed;
3350 SSVAL(buf, 2, dialect_count);
3351 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3352 SSVAL(buf, 6, 0); /* Reserved */
3353 SSVAL(buf, 8, 0); /* Capabilities */
3354 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3358 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3360 if (!NT_STATUS_IS_OK(status)) {
3363 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3365 memset(buf+12, 0, 16); /* ClientGuid */
3367 SBVAL(buf, 28, 0); /* ClientStartTime */
3369 return smb2cli_req_send(state, state->ev,
3370 state->conn, SMB2_OP_NEGPROT,
3372 state->timeout_msec,
3373 0xFEFF, 0, NULL, /* pid, tid, session */
3374 state->smb2.fixed, sizeof(state->smb2.fixed),
3375 state->smb2.dyn, dialect_count*2);
3378 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3380 struct tevent_req *req =
3381 tevent_req_callback_data(subreq,
3383 struct smbXcli_negprot_state *state =
3384 tevent_req_data(req,
3385 struct smbXcli_negprot_state);
3386 struct smbXcli_conn *conn = state->conn;
3387 size_t security_offset, security_length;
3393 uint16_t dialect_revision;
3394 static const struct smb2cli_req_expected_response expected[] = {
3396 .status = NT_STATUS_OK,
3401 status = smb2cli_req_recv(subreq, state, &iov,
3402 expected, ARRAY_SIZE(expected));
3403 TALLOC_FREE(subreq);
3404 if (tevent_req_nterror(req, status)) {
3408 body = (uint8_t *)iov[1].iov_base;
3410 dialect_revision = SVAL(body, 4);
3412 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3413 if (smb2cli_prots[i].proto < state->min_protocol) {
3417 if (smb2cli_prots[i].proto > state->max_protocol) {
3421 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3425 conn->protocol = smb2cli_prots[i].proto;
3429 if (conn->protocol == PROTOCOL_NONE) {
3430 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3431 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3435 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3436 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3440 /* make sure we do not loop forever */
3441 state->min_protocol = PROTOCOL_SMB2_02;
3444 * send a SMB2 negprot, in order to negotiate
3445 * the SMB2 dialect. This needs to use the
3448 state->conn->smb2.mid = 1;
3449 subreq = smbXcli_negprot_smb2_subreq(state);
3450 if (tevent_req_nomem(subreq, req)) {
3453 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3457 conn->smb2.server.security_mode = SVAL(body, 2);
3459 blob = data_blob_const(body + 8, 16);
3460 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3461 if (tevent_req_nterror(req, status)) {
3465 conn->smb2.server.capabilities = IVAL(body, 24);
3466 conn->smb2.server.max_trans_size= IVAL(body, 28);
3467 conn->smb2.server.max_read_size = IVAL(body, 32);
3468 conn->smb2.server.max_write_size= IVAL(body, 36);
3469 conn->smb2.server.system_time = BVAL(body, 40);
3470 conn->smb2.server.start_time = BVAL(body, 48);
3472 security_offset = SVAL(body, 56);
3473 security_length = SVAL(body, 58);
3475 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3476 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3480 if (security_length > iov[2].iov_len) {
3481 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3485 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3488 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3492 tevent_req_done(req);
3495 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3496 TALLOC_CTX *tmp_mem,
3499 size_t num_pending = talloc_array_length(conn->pending);
3500 struct tevent_req *subreq;
3501 struct smbXcli_req_state *substate;
3502 struct tevent_req *req;
3503 struct smbXcli_negprot_state *state;
3504 uint32_t protocol_magic = IVAL(inbuf, 4);
3506 if (num_pending != 1) {
3507 return NT_STATUS_INTERNAL_ERROR;
3510 subreq = conn->pending[0];
3511 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3512 req = tevent_req_callback_data(subreq, struct tevent_req);
3513 state = tevent_req_data(req, struct smbXcli_negprot_state);
3515 switch (protocol_magic) {
3517 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3518 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3519 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3522 if (substate->smb2.recv_iov == NULL) {
3524 * For the SMB1 negprot we have move it.
3526 substate->smb2.recv_iov = substate->smb1.recv_iov;
3527 substate->smb1.recv_iov = NULL;
3530 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3531 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3532 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3535 DEBUG(10, ("Got non-SMB PDU\n"));
3536 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3539 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3541 return tevent_req_simple_recv_ntstatus(req);
3544 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3545 uint32_t timeout_msec,
3546 enum protocol_types min_protocol,
3547 enum protocol_types max_protocol)
3549 TALLOC_CTX *frame = talloc_stackframe();
3550 struct tevent_context *ev;
3551 struct tevent_req *req;
3552 NTSTATUS status = NT_STATUS_NO_MEMORY;
3555 if (smbXcli_conn_has_async_calls(conn)) {
3557 * Can't use sync call while an async call is in flight
3559 status = NT_STATUS_INVALID_PARAMETER_MIX;
3562 ev = tevent_context_init(frame);
3566 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3567 min_protocol, max_protocol);
3571 ok = tevent_req_poll(req, ev);
3573 status = map_nt_error_from_unix_common(errno);
3576 status = smbXcli_negprot_recv(req);
3582 static int smbXcli_session_destructor(struct smbXcli_session *session)
3584 if (session->conn == NULL) {
3588 DLIST_REMOVE(session->conn->sessions, session);
3592 struct smbXcli_session *smbXcli_session_create(TALLOC_CTX *mem_ctx,
3593 struct smbXcli_conn *conn)
3595 struct smbXcli_session *session;
3597 session = talloc_zero(mem_ctx, struct smbXcli_session);
3598 if (session == NULL) {
3601 talloc_set_destructor(session, smbXcli_session_destructor);
3603 DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
3604 session->conn = conn;
3609 uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
3611 struct smbXcli_conn *conn = session->conn;
3612 uint8_t security_mode = 0;
3615 return security_mode;
3618 security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
3619 if (conn->mandatory_signing) {
3620 security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
3623 return security_mode;
3626 uint64_t smb2cli_session_current_id(struct smbXcli_session *session)
3628 return session->smb2.session_id;
3631 void smb2cli_session_set_id_and_flags(struct smbXcli_session *session,
3632 uint64_t session_id,
3633 uint16_t session_flags)
3635 session->smb2.session_id = session_id;
3636 session->smb2.session_flags = session_flags;
3639 NTSTATUS smb2cli_session_update_session_key(struct smbXcli_session *session,
3640 const DATA_BLOB session_key,
3641 const struct iovec *recv_iov)
3643 struct smbXcli_conn *conn = session->conn;
3644 uint16_t no_sign_flags;
3645 DATA_BLOB signing_key;
3649 return NT_STATUS_INVALID_PARAMETER_MIX;
3652 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3654 if (session->smb2.session_flags & no_sign_flags) {
3655 session->smb2.should_sign = false;
3656 return NT_STATUS_OK;
3659 if (session->smb2.signing_key.length > 0) {
3660 signing_key = session->smb2.signing_key;
3662 signing_key = session_key;
3664 if (session->smb2.channel_setup) {
3665 signing_key = session_key;
3668 status = smb2_signing_check_pdu(signing_key, recv_iov, 3);
3669 if (!NT_STATUS_IS_OK(status)) {
3673 if (!session->smb2.channel_setup) {
3674 session->smb2.session_key = data_blob_dup_talloc(session,
3676 if (session->smb2.session_key.data == NULL) {
3677 return NT_STATUS_NO_MEMORY;
3681 if (session->smb2.channel_setup) {
3682 data_blob_free(&session->smb2.signing_key);
3683 session->smb2.channel_setup = false;
3686 if (session->smb2.signing_key.length > 0) {
3687 return NT_STATUS_OK;
3690 session->smb2.signing_key = data_blob_dup_talloc(session, signing_key);
3691 if (session->smb2.signing_key.data == NULL) {
3692 return NT_STATUS_NO_MEMORY;
3695 session->smb2.should_sign = false;
3697 if (conn->desire_signing) {
3698 session->smb2.should_sign = true;
3701 if (conn->smb2.server.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
3702 session->smb2.should_sign = true;
3705 return NT_STATUS_OK;
3708 NTSTATUS smb2cli_session_create_channel(TALLOC_CTX *mem_ctx,
3709 struct smbXcli_session *session1,
3710 struct smbXcli_conn *conn,
3711 struct smbXcli_session **_session2)
3713 struct smbXcli_session *session2;
3714 uint16_t no_sign_flags;
3716 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3718 if (session1->smb2.session_flags & no_sign_flags) {
3719 return NT_STATUS_INVALID_PARAMETER_MIX;
3722 if (session1->smb2.session_key.length == 0) {
3723 return NT_STATUS_INVALID_PARAMETER_MIX;
3726 if (session1->smb2.signing_key.length == 0) {
3727 return NT_STATUS_INVALID_PARAMETER_MIX;
3731 return NT_STATUS_INVALID_PARAMETER_MIX;
3734 session2 = talloc_zero(mem_ctx, struct smbXcli_session);
3735 if (session2 == NULL) {
3736 return NT_STATUS_NO_MEMORY;
3738 session2->smb2.session_id = session1->smb2.session_id;
3739 session2->smb2.session_flags = session1->smb2.session_flags;
3741 session2->smb2.session_key = data_blob_dup_talloc(session2,
3742 session1->smb2.session_key);
3743 if (session2->smb2.session_key.data == NULL) {
3744 return NT_STATUS_NO_MEMORY;
3747 session2->smb2.signing_key = data_blob_dup_talloc(session2,
3748 session1->smb2.signing_key);
3749 if (session2->smb2.signing_key.data == NULL) {
3750 return NT_STATUS_NO_MEMORY;
3753 session2->smb2.should_sign = session1->smb2.should_sign;
3754 session2->smb2.channel_setup = true;
3756 talloc_set_destructor(session2, smbXcli_session_destructor);
3757 DLIST_ADD_END(conn->sessions, session2, struct smbXcli_session *);
3758 session2->conn = conn;
3760 *_session2 = session2;
3761 return NT_STATUS_OK;