2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "../libcli/smb/smb_common.h"
29 #include "../libcli/smb/smb_seal.h"
30 #include "../libcli/smb/smb_signing.h"
31 #include "../libcli/smb/read_smb.h"
32 #include "smbXcli_base.h"
33 #include "librpc/ndr/libndr.h"
37 struct smbXcli_session;
42 struct sockaddr_storage local_ss;
43 struct sockaddr_storage remote_ss;
44 const char *remote_name;
46 struct tevent_queue *outgoing;
47 struct tevent_req **pending;
48 struct tevent_req *read_smb_req;
50 enum protocol_types protocol;
53 bool mandatory_signing;
56 * The incoming dispatch function should return:
57 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
58 * - NT_STATUS_OK, if no more processing is desired, e.g.
59 * the dispatch function called
61 * - All other return values disconnect the connection.
63 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
69 uint32_t capabilities;
74 uint32_t capabilities;
77 uint16_t security_mode;
86 const char *workgroup;
92 uint32_t capabilities;
97 struct smb_signing_state *signing;
98 struct smb_trans_enc_state *trans_enc;
103 uint16_t security_mode;
108 uint32_t capabilities;
109 uint16_t security_mode;
111 uint32_t max_trans_size;
112 uint32_t max_read_size;
113 uint32_t max_write_size;
120 uint16_t cur_credits;
121 uint16_t max_credits;
124 struct smbXcli_session *sessions;
127 struct smbXcli_session {
128 struct smbXcli_session *prev, *next;
129 struct smbXcli_conn *conn;
133 uint16_t session_flags;
134 DATA_BLOB signing_key;
135 DATA_BLOB session_key;
141 struct smbXcli_req_state {
142 struct tevent_context *ev;
143 struct smbXcli_conn *conn;
144 struct smbXcli_session *session; /* maybe NULL */
146 uint8_t length_hdr[4];
153 /* Space for the header including the wct */
154 uint8_t hdr[HDR_VWV];
157 * For normal requests, smb1cli_req_send chooses a mid.
158 * SecondaryV trans requests need to use the mid of the primary
159 * request, so we need a place to store it.
160 * Assume it is set if != 0.
165 uint8_t bytecount_buf[2];
167 #define MAX_SMB_IOV 5
168 /* length_hdr, hdr, words, byte_count, buffers */
169 struct iovec iov[1 + 3 + MAX_SMB_IOV];
174 struct tevent_req **chained_requests;
177 NTSTATUS recv_status;
178 /* always an array of 3 talloc elements */
179 struct iovec *recv_iov;
183 const uint8_t *fixed;
189 uint8_t pad[7]; /* padding space for compounding */
191 /* always an array of 3 talloc elements */
192 struct iovec *recv_iov;
194 uint16_t credit_charge;
196 bool signing_skipped;
202 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
205 * NT_STATUS_OK, means we do not notify the callers
207 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
209 while (conn->sessions) {
210 conn->sessions->conn = NULL;
211 DLIST_REMOVE(conn->sessions, conn->sessions);
214 if (conn->smb1.trans_enc) {
215 common_free_encryption_state(&conn->smb1.trans_enc);
221 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
223 const char *remote_name,
224 enum smb_signing_setting signing_state,
225 uint32_t smb1_capabilities,
226 struct GUID *client_guid)
228 struct smbXcli_conn *conn = NULL;
230 struct sockaddr *sa = NULL;
234 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
240 conn->write_fd = dup(fd);
241 if (conn->write_fd == -1) {
245 conn->remote_name = talloc_strdup(conn, remote_name);
246 if (conn->remote_name == NULL) {
251 ss = (void *)&conn->local_ss;
252 sa = (struct sockaddr *)ss;
253 sa_length = sizeof(conn->local_ss);
254 ret = getsockname(fd, sa, &sa_length);
258 ss = (void *)&conn->remote_ss;
259 sa = (struct sockaddr *)ss;
260 sa_length = sizeof(conn->remote_ss);
261 ret = getpeername(fd, sa, &sa_length);
266 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
267 if (conn->outgoing == NULL) {
270 conn->pending = NULL;
272 conn->protocol = PROTOCOL_NONE;
274 switch (signing_state) {
275 case SMB_SIGNING_OFF:
277 conn->allow_signing = false;
278 conn->desire_signing = false;
279 conn->mandatory_signing = false;
281 case SMB_SIGNING_DEFAULT:
282 case SMB_SIGNING_IF_REQUIRED:
283 /* if the server requires it */
284 conn->allow_signing = true;
285 conn->desire_signing = false;
286 conn->mandatory_signing = false;
288 case SMB_SIGNING_REQUIRED:
290 conn->allow_signing = true;
291 conn->desire_signing = true;
292 conn->mandatory_signing = true;
296 conn->smb1.client.capabilities = smb1_capabilities;
297 conn->smb1.client.max_xmit = UINT16_MAX;
299 conn->smb1.capabilities = conn->smb1.client.capabilities;
300 conn->smb1.max_xmit = 1024;
304 /* initialise signing */
305 conn->smb1.signing = smb_signing_init(conn,
307 conn->desire_signing,
308 conn->mandatory_signing);
309 if (!conn->smb1.signing) {
313 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
314 if (conn->mandatory_signing) {
315 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
318 conn->smb2.client.guid = *client_guid;
321 conn->smb2.cur_credits = 1;
322 conn->smb2.max_credits = 0;
324 talloc_set_destructor(conn, smbXcli_conn_destructor);
328 if (conn->write_fd != -1) {
329 close(conn->write_fd);
335 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
341 if (conn->read_fd == -1) {
348 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
350 return conn->protocol;
353 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
355 if (conn->protocol >= PROTOCOL_SMB2_02) {
359 if (conn->smb1.capabilities & CAP_UNICODE) {
366 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
368 set_socket_options(conn->read_fd, options);
371 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
373 return &conn->local_ss;
376 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
378 return &conn->remote_ss;
381 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
383 return conn->remote_name;
386 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
388 if (conn->protocol >= PROTOCOL_SMB2_02) {
395 return conn->smb1.server.max_mux;
398 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
400 if (conn->protocol >= PROTOCOL_SMB2_02) {
401 return conn->smb2.server.system_time;
404 return conn->smb1.server.system_time;
407 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
409 if (conn->protocol >= PROTOCOL_SMB2_02) {
410 return &conn->smb2.server.gss_blob;
413 return &conn->smb1.server.gss_blob;
416 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
418 if (conn->protocol >= PROTOCOL_SMB2_02) {
419 return &conn->smb2.server.guid;
422 return &conn->smb1.server.guid;
425 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
427 return conn->smb1.capabilities;
430 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
432 return conn->smb1.max_xmit;
435 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
437 return conn->smb1.server.session_key;
440 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
442 return conn->smb1.server.challenge;
445 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
447 return conn->smb1.server.security_mode;
450 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
452 return conn->smb1.server.time_zone;
455 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
456 const DATA_BLOB user_session_key,
457 const DATA_BLOB response)
459 return smb_signing_activate(conn->smb1.signing,
464 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
465 const uint8_t *buf, uint32_t seqnum)
467 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
470 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
472 return smb_signing_is_active(conn->smb1.signing);
475 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
476 struct smb_trans_enc_state *es)
478 /* Replace the old state, if any. */
479 if (conn->smb1.trans_enc) {
480 common_free_encryption_state(&conn->smb1.trans_enc);
482 conn->smb1.trans_enc = es;
485 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
487 return common_encryption_on(conn->smb1.trans_enc);
491 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
493 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
494 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
496 if (NT_STATUS_IS_OK(status)) {
500 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
504 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
508 * Is the SMB command able to hold an AND_X successor
509 * @param[in] cmd The SMB command in question
510 * @retval Can we add a chained request after "cmd"?
512 bool smb1cli_is_andx_req(uint8_t cmd)
532 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
534 size_t num_pending = talloc_array_length(conn->pending);
540 result = conn->smb1.mid++;
541 if ((result == 0) || (result == 0xffff)) {
545 for (i=0; i<num_pending; i++) {
546 if (result == smb1cli_req_mid(conn->pending[i])) {
551 if (i == num_pending) {
557 void smbXcli_req_unset_pending(struct tevent_req *req)
559 struct smbXcli_req_state *state =
561 struct smbXcli_req_state);
562 struct smbXcli_conn *conn = state->conn;
563 size_t num_pending = talloc_array_length(conn->pending);
566 if (state->smb1.mid != 0) {
568 * This is a [nt]trans[2] request which waits
569 * for more than one reply.
574 talloc_set_destructor(req, NULL);
576 if (num_pending == 1) {
578 * The pending read_smb tevent_req is a child of
579 * conn->pending. So if nothing is pending anymore, we need to
580 * delete the socket read fde.
582 TALLOC_FREE(conn->pending);
583 conn->read_smb_req = NULL;
587 for (i=0; i<num_pending; i++) {
588 if (req == conn->pending[i]) {
592 if (i == num_pending) {
594 * Something's seriously broken. Just returning here is the
595 * right thing nevertheless, the point of this routine is to
596 * remove ourselves from conn->pending.
602 * Remove ourselves from the conn->pending array
604 for (; i < (num_pending - 1); i++) {
605 conn->pending[i] = conn->pending[i+1];
609 * No NULL check here, we're shrinking by sizeof(void *), and
610 * talloc_realloc just adjusts the size for this.
612 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
617 static int smbXcli_req_destructor(struct tevent_req *req)
619 struct smbXcli_req_state *state =
621 struct smbXcli_req_state);
624 * Make sure we really remove it from
625 * the pending array on destruction.
628 smbXcli_req_unset_pending(req);
632 static bool smb1cli_req_cancel(struct tevent_req *req);
633 static bool smb2cli_req_cancel(struct tevent_req *req);
635 static bool smbXcli_req_cancel(struct tevent_req *req)
637 struct smbXcli_req_state *state =
639 struct smbXcli_req_state);
641 if (!smbXcli_conn_is_connected(state->conn)) {
645 if (state->conn->protocol == PROTOCOL_NONE) {
649 if (state->conn->protocol >= PROTOCOL_SMB2_02) {
650 return smb2cli_req_cancel(req);
653 return smb1cli_req_cancel(req);
656 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
658 bool smbXcli_req_set_pending(struct tevent_req *req)
660 struct smbXcli_req_state *state =
662 struct smbXcli_req_state);
663 struct smbXcli_conn *conn;
664 struct tevent_req **pending;
669 if (!smbXcli_conn_is_connected(conn)) {
673 num_pending = talloc_array_length(conn->pending);
675 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
677 if (pending == NULL) {
680 pending[num_pending] = req;
681 conn->pending = pending;
682 talloc_set_destructor(req, smbXcli_req_destructor);
683 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
685 if (!smbXcli_conn_receive_next(conn)) {
687 * the caller should notify the current request
689 * And all other pending requests get notified
690 * by smbXcli_conn_disconnect().
692 smbXcli_req_unset_pending(req);
693 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
700 static void smbXcli_conn_received(struct tevent_req *subreq);
702 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
704 size_t num_pending = talloc_array_length(conn->pending);
705 struct tevent_req *req;
706 struct smbXcli_req_state *state;
708 if (conn->read_smb_req != NULL) {
712 if (num_pending == 0) {
713 if (conn->smb2.mid < UINT64_MAX) {
714 /* no more pending requests, so we are done for now */
719 * If there are no more SMB2 requests possible,
720 * because we are out of message ids,
721 * we need to disconnect.
723 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
727 req = conn->pending[0];
728 state = tevent_req_data(req, struct smbXcli_req_state);
731 * We're the first ones, add the read_smb request that waits for the
732 * answer from the server
734 conn->read_smb_req = read_smb_send(conn->pending,
737 if (conn->read_smb_req == NULL) {
740 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
744 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
746 tevent_queue_stop(conn->outgoing);
748 if (conn->read_fd != -1) {
749 close(conn->read_fd);
751 if (conn->write_fd != -1) {
752 close(conn->write_fd);
758 * Cancel all pending requests. We do not do a for-loop walking
759 * conn->pending because that array changes in
760 * smbXcli_req_unset_pending.
762 while (talloc_array_length(conn->pending) > 0) {
763 struct tevent_req *req;
764 struct smbXcli_req_state *state;
765 struct tevent_req **chain;
769 req = conn->pending[0];
770 state = tevent_req_data(req, struct smbXcli_req_state);
772 if (state->smb1.chained_requests == NULL) {
774 * We're dead. No point waiting for trans2
779 smbXcli_req_unset_pending(req);
781 if (NT_STATUS_IS_OK(status)) {
782 /* do not notify the callers */
787 * we need to defer the callback, because we may notify
788 * more then one caller.
790 tevent_req_defer_callback(req, state->ev);
791 tevent_req_nterror(req, status);
795 chain = talloc_move(conn, &state->smb1.chained_requests);
796 num_chained = talloc_array_length(chain);
798 for (i=0; i<num_chained; i++) {
800 state = tevent_req_data(req, struct smbXcli_req_state);
803 * We're dead. No point waiting for trans2
808 smbXcli_req_unset_pending(req);
810 if (NT_STATUS_IS_OK(status)) {
811 /* do not notify the callers */
816 * we need to defer the callback, because we may notify
817 * more than one caller.
819 tevent_req_defer_callback(req, state->ev);
820 tevent_req_nterror(req, status);
827 * Fetch a smb request's mid. Only valid after the request has been sent by
828 * smb1cli_req_send().
830 uint16_t smb1cli_req_mid(struct tevent_req *req)
832 struct smbXcli_req_state *state =
834 struct smbXcli_req_state);
836 if (state->smb1.mid != 0) {
837 return state->smb1.mid;
840 return SVAL(state->smb1.hdr, HDR_MID);
843 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
845 struct smbXcli_req_state *state =
847 struct smbXcli_req_state);
849 state->smb1.mid = mid;
852 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
854 struct smbXcli_req_state *state =
856 struct smbXcli_req_state);
858 return state->smb1.seqnum;
861 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
863 struct smbXcli_req_state *state =
865 struct smbXcli_req_state);
867 state->smb1.seqnum = seqnum;
870 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
874 for (i=0; i<count; i++) {
875 result += iov[i].iov_len;
880 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
881 const struct iovec *iov,
884 size_t len = smbXcli_iov_len(iov, count);
889 buf = talloc_array(mem_ctx, uint8_t, len);
894 for (i=0; i<count; i++) {
895 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
896 copied += iov[i].iov_len;
901 static void smb1cli_req_flags(enum protocol_types protocol,
902 uint32_t smb1_capabilities,
904 uint8_t additional_flags,
907 uint16_t additional_flags2,
908 uint16_t clear_flags2,
914 if (protocol >= PROTOCOL_LANMAN1) {
915 flags |= FLAG_CASELESS_PATHNAMES;
916 flags |= FLAG_CANONICAL_PATHNAMES;
919 if (protocol >= PROTOCOL_LANMAN2) {
920 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
921 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
924 if (protocol >= PROTOCOL_NT1) {
925 flags2 |= FLAGS2_IS_LONG_NAME;
927 if (smb1_capabilities & CAP_UNICODE) {
928 flags2 |= FLAGS2_UNICODE_STRINGS;
930 if (smb1_capabilities & CAP_STATUS32) {
931 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
933 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
934 flags2 |= FLAGS2_EXTENDED_SECURITY;
938 flags |= additional_flags;
939 flags &= ~clear_flags;
940 flags2 |= additional_flags2;
941 flags2 &= ~clear_flags2;
947 static void smb1cli_req_cancel_done(struct tevent_req *subreq);
949 static bool smb1cli_req_cancel(struct tevent_req *req)
951 struct smbXcli_req_state *state =
953 struct smbXcli_req_state);
960 struct tevent_req *subreq;
963 flags = CVAL(state->smb1.hdr, HDR_FLG);
964 flags2 = SVAL(state->smb1.hdr, HDR_FLG2);
965 pid = SVAL(state->smb1.hdr, HDR_PID);
966 pid |= SVAL(state->smb1.hdr, HDR_PIDHIGH)<<16;
967 tid = SVAL(state->smb1.hdr, HDR_TID);
968 uid = SVAL(state->smb1.hdr, HDR_UID);
969 mid = SVAL(state->smb1.hdr, HDR_MID);
971 subreq = smb1cli_req_create(state, state->ev,
979 0, NULL); /* bytes */
980 if (subreq == NULL) {
983 smb1cli_req_set_mid(subreq, mid);
985 status = smb1cli_req_chain_submit(&subreq, 1);
986 if (!NT_STATUS_IS_OK(status)) {
990 smb1cli_req_set_mid(subreq, 0);
992 tevent_req_set_callback(subreq, smb1cli_req_cancel_done, NULL);
997 static void smb1cli_req_cancel_done(struct tevent_req *subreq)
999 /* we do not care about the result */
1000 TALLOC_FREE(subreq);
1003 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
1004 struct tevent_context *ev,
1005 struct smbXcli_conn *conn,
1006 uint8_t smb_command,
1007 uint8_t additional_flags,
1008 uint8_t clear_flags,
1009 uint16_t additional_flags2,
1010 uint16_t clear_flags2,
1011 uint32_t timeout_msec,
1015 uint8_t wct, uint16_t *vwv,
1017 struct iovec *bytes_iov)
1019 struct tevent_req *req;
1020 struct smbXcli_req_state *state;
1022 uint16_t flags2 = 0;
1024 if (iov_count > MAX_SMB_IOV) {
1026 * Should not happen :-)
1031 req = tevent_req_create(mem_ctx, &state,
1032 struct smbXcli_req_state);
1039 state->smb1.recv_cmd = 0xFF;
1040 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
1041 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
1042 if (state->smb1.recv_iov == NULL) {
1047 smb1cli_req_flags(conn->protocol,
1048 conn->smb1.capabilities,
1057 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
1058 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
1059 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
1060 SCVAL(state->smb1.hdr, HDR_FLG, flags);
1061 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
1062 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
1063 SSVAL(state->smb1.hdr, HDR_TID, tid);
1064 SSVAL(state->smb1.hdr, HDR_PID, pid);
1065 SSVAL(state->smb1.hdr, HDR_UID, uid);
1066 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
1067 SCVAL(state->smb1.hdr, HDR_WCT, wct);
1069 state->smb1.vwv = vwv;
1071 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
1073 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
1074 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
1075 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
1076 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
1077 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
1078 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
1079 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
1080 state->smb1.iov[3].iov_len = sizeof(uint16_t);
1082 if (iov_count != 0) {
1083 memcpy(&state->smb1.iov[4], bytes_iov,
1084 iov_count * sizeof(*bytes_iov));
1086 state->smb1.iov_count = iov_count + 4;
1088 if (timeout_msec > 0) {
1089 struct timeval endtime;
1091 endtime = timeval_current_ofs_msec(timeout_msec);
1092 if (!tevent_req_set_endtime(req, ev, endtime)) {
1097 switch (smb_command) {
1101 state->one_way = true;
1104 state->one_way = true;
1105 state->smb1.one_way_seqnum = true;
1109 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1110 state->one_way = true;
1118 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1119 struct iovec *iov, int iov_count,
1121 bool one_way_seqnum)
1123 TALLOC_CTX *frame = NULL;
1127 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1128 * iovec directly. MD5Update would do that just fine.
1131 if (iov_count < 4) {
1132 return NT_STATUS_INVALID_PARAMETER_MIX;
1134 if (iov[0].iov_len != NBT_HDR_SIZE) {
1135 return NT_STATUS_INVALID_PARAMETER_MIX;
1137 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1138 return NT_STATUS_INVALID_PARAMETER_MIX;
1140 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1141 return NT_STATUS_INVALID_PARAMETER_MIX;
1143 if (iov[3].iov_len != sizeof(uint16_t)) {
1144 return NT_STATUS_INVALID_PARAMETER_MIX;
1147 frame = talloc_stackframe();
1149 buf = smbXcli_iov_concat(frame, iov, iov_count);
1151 return NT_STATUS_NO_MEMORY;
1154 *seqnum = smb_signing_next_seqnum(conn->smb1.signing,
1156 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1157 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1160 return NT_STATUS_OK;
1163 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1164 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1165 TALLOC_CTX *tmp_mem,
1168 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1169 struct smbXcli_req_state *state,
1170 struct iovec *iov, int iov_count)
1172 struct tevent_req *subreq;
1176 if (!smbXcli_conn_is_connected(state->conn)) {
1177 return NT_STATUS_CONNECTION_DISCONNECTED;
1180 if (state->conn->protocol > PROTOCOL_NT1) {
1181 return NT_STATUS_REVISION_MISMATCH;
1184 if (iov_count < 4) {
1185 return NT_STATUS_INVALID_PARAMETER_MIX;
1187 if (iov[0].iov_len != NBT_HDR_SIZE) {
1188 return NT_STATUS_INVALID_PARAMETER_MIX;
1190 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1191 return NT_STATUS_INVALID_PARAMETER_MIX;
1193 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1194 return NT_STATUS_INVALID_PARAMETER_MIX;
1196 if (iov[3].iov_len != sizeof(uint16_t)) {
1197 return NT_STATUS_INVALID_PARAMETER_MIX;
1200 if (state->smb1.mid != 0) {
1201 mid = state->smb1.mid;
1203 mid = smb1cli_alloc_mid(state->conn);
1205 SSVAL(iov[1].iov_base, HDR_MID, mid);
1207 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1209 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1210 &state->smb1.seqnum,
1211 state->smb1.one_way_seqnum);
1213 if (!NT_STATUS_IS_OK(status)) {
1218 * If we supported multiple encrytion contexts
1219 * here we'd look up based on tid.
1221 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1222 char *buf, *enc_buf;
1224 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1226 return NT_STATUS_NO_MEMORY;
1228 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1229 (char *)buf, &enc_buf);
1231 if (!NT_STATUS_IS_OK(status)) {
1232 DEBUG(0, ("Error in encrypting client message: %s\n",
1233 nt_errstr(status)));
1236 buf = (char *)talloc_memdup(state, enc_buf,
1237 smb_len_nbt(enc_buf)+4);
1240 return NT_STATUS_NO_MEMORY;
1242 iov[0].iov_base = (void *)buf;
1243 iov[0].iov_len = talloc_get_size(buf);
1247 if (state->conn->dispatch_incoming == NULL) {
1248 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1251 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
1253 subreq = writev_send(state, state->ev, state->conn->outgoing,
1254 state->conn->write_fd, false, iov, iov_count);
1255 if (subreq == NULL) {
1256 return NT_STATUS_NO_MEMORY;
1258 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1259 return NT_STATUS_OK;
1262 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1263 struct tevent_context *ev,
1264 struct smbXcli_conn *conn,
1265 uint8_t smb_command,
1266 uint8_t additional_flags,
1267 uint8_t clear_flags,
1268 uint16_t additional_flags2,
1269 uint16_t clear_flags2,
1270 uint32_t timeout_msec,
1274 uint8_t wct, uint16_t *vwv,
1276 const uint8_t *bytes)
1278 struct tevent_req *req;
1282 iov.iov_base = discard_const_p(void, bytes);
1283 iov.iov_len = num_bytes;
1285 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1286 additional_flags, clear_flags,
1287 additional_flags2, clear_flags2,
1294 if (!tevent_req_is_in_progress(req)) {
1295 return tevent_req_post(req, ev);
1297 status = smb1cli_req_chain_submit(&req, 1);
1298 if (tevent_req_nterror(req, status)) {
1299 return tevent_req_post(req, ev);
1304 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1306 struct tevent_req *req =
1307 tevent_req_callback_data(subreq,
1309 struct smbXcli_req_state *state =
1310 tevent_req_data(req,
1311 struct smbXcli_req_state);
1315 nwritten = writev_recv(subreq, &err);
1316 TALLOC_FREE(subreq);
1317 if (nwritten == -1) {
1318 NTSTATUS status = map_nt_error_from_unix_common(err);
1319 smbXcli_conn_disconnect(state->conn, status);
1323 if (state->one_way) {
1324 state->inbuf = NULL;
1325 tevent_req_done(req);
1329 if (!smbXcli_req_set_pending(req)) {
1330 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1335 static void smbXcli_conn_received(struct tevent_req *subreq)
1337 struct smbXcli_conn *conn =
1338 tevent_req_callback_data(subreq,
1339 struct smbXcli_conn);
1340 TALLOC_CTX *frame = talloc_stackframe();
1346 if (subreq != conn->read_smb_req) {
1347 DEBUG(1, ("Internal error: cli_smb_received called with "
1348 "unexpected subreq\n"));
1349 status = NT_STATUS_INTERNAL_ERROR;
1350 smbXcli_conn_disconnect(conn, status);
1354 conn->read_smb_req = NULL;
1356 received = read_smb_recv(subreq, frame, &inbuf, &err);
1357 TALLOC_FREE(subreq);
1358 if (received == -1) {
1359 status = map_nt_error_from_unix_common(err);
1360 smbXcli_conn_disconnect(conn, status);
1365 status = conn->dispatch_incoming(conn, frame, inbuf);
1367 if (NT_STATUS_IS_OK(status)) {
1369 * We should not do any more processing
1370 * as the dispatch function called
1371 * tevent_req_done().
1374 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1376 * We got an error, so notify all pending requests
1378 smbXcli_conn_disconnect(conn, status);
1383 * We got NT_STATUS_RETRY, so we may ask for a
1384 * next incoming pdu.
1386 if (!smbXcli_conn_receive_next(conn)) {
1387 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1391 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1392 struct iovec **piov, int *pnum_iov)
1403 buflen = smb_len_nbt(buf);
1406 hdr = buf + NBT_HDR_SIZE;
1408 if (buflen < MIN_SMB_SIZE) {
1409 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1413 * This returns iovec elements in the following order:
1428 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1430 return NT_STATUS_NO_MEMORY;
1432 iov[0].iov_base = hdr;
1433 iov[0].iov_len = HDR_WCT;
1436 cmd = CVAL(hdr, HDR_COM);
1440 size_t len = buflen - taken;
1442 struct iovec *iov_tmp;
1449 * we need at least WCT and BCC
1451 needed = sizeof(uint8_t) + sizeof(uint16_t);
1453 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1454 __location__, (int)len, (int)needed));
1459 * Now we check if the specified words are there
1461 wct = CVAL(hdr, wct_ofs);
1462 needed += wct * sizeof(uint16_t);
1464 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1465 __location__, (int)len, (int)needed));
1470 * Now we check if the specified bytes are there
1472 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1473 bcc = SVAL(hdr, bcc_ofs);
1474 needed += bcc * sizeof(uint8_t);
1476 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1477 __location__, (int)len, (int)needed));
1482 * we allocate 2 iovec structures for words and bytes
1484 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1486 if (iov_tmp == NULL) {
1488 return NT_STATUS_NO_MEMORY;
1491 cur = &iov[num_iov];
1494 cur[0].iov_len = wct * sizeof(uint16_t);
1495 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1496 cur[1].iov_len = bcc * sizeof(uint8_t);
1497 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1501 if (!smb1cli_is_andx_req(cmd)) {
1503 * If the current command does not have AndX chanining
1509 if (wct == 0 && bcc == 0) {
1511 * An empty response also ends the chain,
1512 * most likely with an error.
1518 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1519 __location__, (int)wct, (int)cmd));
1522 cmd = CVAL(cur[0].iov_base, 0);
1525 * If it is the end of the chain we are also done.
1529 wct_ofs = SVAL(cur[0].iov_base, 2);
1531 if (wct_ofs < taken) {
1532 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1534 if (wct_ofs > buflen) {
1535 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1539 * we consumed everything up to the start of the next
1545 remaining = buflen - taken;
1547 if (remaining > 0 && num_iov >= 3) {
1549 * The last DATA block gets the remaining
1550 * bytes, this is needed to support
1551 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1553 iov[num_iov-1].iov_len += remaining;
1557 *pnum_iov = num_iov;
1558 return NT_STATUS_OK;
1562 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1565 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1566 TALLOC_CTX *tmp_mem,
1569 struct tevent_req *req;
1570 struct smbXcli_req_state *state;
1577 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1578 struct iovec *iov = NULL;
1580 struct tevent_req **chain = NULL;
1581 size_t num_chained = 0;
1582 size_t num_responses = 0;
1584 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1585 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1586 DEBUG(10, ("Got non-SMB PDU\n"));
1587 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1591 * If we supported multiple encrytion contexts
1592 * here we'd look up based on tid.
1594 if (common_encryption_on(conn->smb1.trans_enc)
1595 && (CVAL(inbuf, 0) == 0)) {
1596 uint16_t enc_ctx_num;
1598 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1599 if (!NT_STATUS_IS_OK(status)) {
1600 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1601 nt_errstr(status)));
1605 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1606 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1608 conn->smb1.trans_enc->enc_ctx_num));
1609 return NT_STATUS_INVALID_HANDLE;
1612 status = common_decrypt_buffer(conn->smb1.trans_enc,
1614 if (!NT_STATUS_IS_OK(status)) {
1615 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1616 nt_errstr(status)));
1621 mid = SVAL(inhdr, HDR_MID);
1622 num_pending = talloc_array_length(conn->pending);
1624 for (i=0; i<num_pending; i++) {
1625 if (mid == smb1cli_req_mid(conn->pending[i])) {
1629 if (i == num_pending) {
1630 /* Dump unexpected reply */
1631 return NT_STATUS_RETRY;
1634 oplock_break = false;
1636 if (mid == 0xffff) {
1638 * Paranoia checks that this is really an oplock break request.
1640 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1641 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1642 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1643 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1644 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1646 if (!oplock_break) {
1647 /* Dump unexpected reply */
1648 return NT_STATUS_RETRY;
1652 req = conn->pending[i];
1653 state = tevent_req_data(req, struct smbXcli_req_state);
1655 if (!oplock_break /* oplock breaks are not signed */
1656 && !smb_signing_check_pdu(conn->smb1.signing,
1657 inbuf, state->smb1.seqnum+1)) {
1658 DEBUG(10, ("cli_check_sign_mac failed\n"));
1659 return NT_STATUS_ACCESS_DENIED;
1662 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1664 if (!NT_STATUS_IS_OK(status)) {
1665 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1666 nt_errstr(status)));
1670 cmd = CVAL(inhdr, HDR_COM);
1671 status = smb1cli_pull_raw_error(inhdr);
1673 if (state->smb1.chained_requests == NULL) {
1675 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1678 smbXcli_req_unset_pending(req);
1680 state->smb1.recv_cmd = cmd;
1681 state->smb1.recv_status = status;
1682 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1684 state->smb1.recv_iov[0] = iov[0];
1685 state->smb1.recv_iov[1] = iov[1];
1686 state->smb1.recv_iov[2] = iov[2];
1688 if (talloc_array_length(conn->pending) == 0) {
1689 tevent_req_done(req);
1690 return NT_STATUS_OK;
1693 tevent_req_defer_callback(req, state->ev);
1694 tevent_req_done(req);
1695 return NT_STATUS_RETRY;
1698 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1699 num_chained = talloc_array_length(chain);
1700 num_responses = (num_iov - 1)/2;
1702 if (num_responses > num_chained) {
1703 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1706 for (i=0; i<num_chained; i++) {
1707 size_t iov_idx = 1 + (i*2);
1708 struct iovec *cur = &iov[iov_idx];
1712 state = tevent_req_data(req, struct smbXcli_req_state);
1714 smbXcli_req_unset_pending(req);
1717 * as we finish multiple requests here
1718 * we need to defer the callbacks as
1719 * they could destroy our current stack state.
1721 tevent_req_defer_callback(req, state->ev);
1723 if (i >= num_responses) {
1724 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1728 state->smb1.recv_cmd = cmd;
1730 if (i == (num_responses - 1)) {
1732 * The last request in the chain gets the status
1734 state->smb1.recv_status = status;
1736 cmd = CVAL(cur[0].iov_base, 0);
1737 state->smb1.recv_status = NT_STATUS_OK;
1740 state->inbuf = inbuf;
1743 * Note: here we use talloc_reference() in a way
1744 * that does not expose it to the caller.
1746 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1747 if (tevent_req_nomem(inbuf_ref, req)) {
1751 /* copy the related buffers */
1752 state->smb1.recv_iov[0] = iov[0];
1753 state->smb1.recv_iov[1] = cur[0];
1754 state->smb1.recv_iov[2] = cur[1];
1756 tevent_req_done(req);
1759 return NT_STATUS_RETRY;
1762 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1763 TALLOC_CTX *mem_ctx,
1764 struct iovec **piov,
1768 uint32_t *pvwv_offset,
1769 uint32_t *pnum_bytes,
1771 uint32_t *pbytes_offset,
1773 const struct smb1cli_req_expected_response *expected,
1774 size_t num_expected)
1776 struct smbXcli_req_state *state =
1777 tevent_req_data(req,
1778 struct smbXcli_req_state);
1779 NTSTATUS status = NT_STATUS_OK;
1780 struct iovec *recv_iov = NULL;
1781 uint8_t *hdr = NULL;
1783 uint32_t vwv_offset = 0;
1784 uint16_t *vwv = NULL;
1785 uint32_t num_bytes = 0;
1786 uint32_t bytes_offset = 0;
1787 uint8_t *bytes = NULL;
1789 bool found_status = false;
1790 bool found_size = false;
1804 if (pvwv_offset != NULL) {
1807 if (pnum_bytes != NULL) {
1810 if (pbytes != NULL) {
1813 if (pbytes_offset != NULL) {
1816 if (pinbuf != NULL) {
1820 if (state->inbuf != NULL) {
1821 recv_iov = state->smb1.recv_iov;
1822 hdr = (uint8_t *)recv_iov[0].iov_base;
1823 wct = recv_iov[1].iov_len/2;
1824 vwv = (uint16_t *)recv_iov[1].iov_base;
1825 vwv_offset = PTR_DIFF(vwv, hdr);
1826 num_bytes = recv_iov[2].iov_len;
1827 bytes = (uint8_t *)recv_iov[2].iov_base;
1828 bytes_offset = PTR_DIFF(bytes, hdr);
1831 if (tevent_req_is_nterror(req, &status)) {
1832 for (i=0; i < num_expected; i++) {
1833 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1834 found_status = true;
1840 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1846 if (num_expected == 0) {
1847 found_status = true;
1851 status = state->smb1.recv_status;
1853 for (i=0; i < num_expected; i++) {
1854 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1858 found_status = true;
1859 if (expected[i].wct == 0) {
1864 if (expected[i].wct == wct) {
1870 if (!found_status) {
1875 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1879 *piov = talloc_move(mem_ctx, &recv_iov);
1891 if (pvwv_offset != NULL) {
1892 *pvwv_offset = vwv_offset;
1894 if (pnum_bytes != NULL) {
1895 *pnum_bytes = num_bytes;
1897 if (pbytes != NULL) {
1900 if (pbytes_offset != NULL) {
1901 *pbytes_offset = bytes_offset;
1903 if (pinbuf != NULL) {
1904 *pinbuf = state->inbuf;
1910 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1917 for (i=0; i<num_reqs; i++) {
1918 struct smbXcli_req_state *state;
1919 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1920 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1921 state->smb1.iov_count-2);
1922 wct_ofs = (wct_ofs + 3) & ~3;
1927 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1929 struct smbXcli_req_state *first_state =
1930 tevent_req_data(reqs[0],
1931 struct smbXcli_req_state);
1932 struct smbXcli_req_state *state;
1934 size_t chain_padding = 0;
1936 struct iovec *iov = NULL;
1937 struct iovec *this_iov;
1941 if (num_reqs == 1) {
1942 return smb1cli_req_writev_submit(reqs[0], first_state,
1943 first_state->smb1.iov,
1944 first_state->smb1.iov_count);
1948 for (i=0; i<num_reqs; i++) {
1949 if (!tevent_req_is_in_progress(reqs[i])) {
1950 return NT_STATUS_INTERNAL_ERROR;
1953 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1955 if (state->smb1.iov_count < 4) {
1956 return NT_STATUS_INVALID_PARAMETER_MIX;
1961 * The NBT and SMB header
1974 iovlen += state->smb1.iov_count - 2;
1977 iov = talloc_zero_array(first_state, struct iovec, iovlen);
1979 return NT_STATUS_NO_MEMORY;
1982 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1983 first_state, reqs, sizeof(*reqs) * num_reqs);
1984 if (first_state->smb1.chained_requests == NULL) {
1986 return NT_STATUS_NO_MEMORY;
1989 wct_offset = HDR_WCT;
1992 for (i=0; i<num_reqs; i++) {
1993 size_t next_padding = 0;
1996 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1998 if (i < num_reqs-1) {
1999 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
2000 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
2002 TALLOC_FREE(first_state->smb1.chained_requests);
2003 return NT_STATUS_INVALID_PARAMETER_MIX;
2007 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
2008 state->smb1.iov_count-2) + 1;
2009 if ((wct_offset % 4) != 0) {
2010 next_padding = 4 - (wct_offset % 4);
2012 wct_offset += next_padding;
2013 vwv = state->smb1.vwv;
2015 if (i < num_reqs-1) {
2016 struct smbXcli_req_state *next_state =
2017 tevent_req_data(reqs[i+1],
2018 struct smbXcli_req_state);
2019 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
2021 SSVAL(vwv+1, 0, wct_offset);
2022 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
2023 /* properly end the chain */
2024 SCVAL(vwv+0, 0, 0xff);
2025 SCVAL(vwv+0, 1, 0xff);
2031 * The NBT and SMB header
2033 this_iov[0] = state->smb1.iov[0];
2034 this_iov[1] = state->smb1.iov[1];
2038 * This one is a bit subtle. We have to add
2039 * chain_padding bytes between the requests, and we
2040 * have to also include the wct field of the
2041 * subsequent requests. We use the subsequent header
2042 * for the padding, it contains the wct field in its
2045 this_iov[0].iov_len = chain_padding+1;
2046 this_iov[0].iov_base = (void *)&state->smb1.hdr[
2047 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
2048 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
2053 * copy the words and bytes
2055 memcpy(this_iov, state->smb1.iov+2,
2056 sizeof(struct iovec) * (state->smb1.iov_count-2));
2057 this_iov += state->smb1.iov_count - 2;
2058 chain_padding = next_padding;
2061 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
2062 if (nbt_len > first_state->conn->smb1.max_xmit) {
2064 TALLOC_FREE(first_state->smb1.chained_requests);
2065 return NT_STATUS_INVALID_PARAMETER_MIX;
2068 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
2069 if (!NT_STATUS_IS_OK(status)) {
2071 TALLOC_FREE(first_state->smb1.chained_requests);
2075 return NT_STATUS_OK;
2078 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
2080 return ((tevent_queue_length(conn->outgoing) != 0)
2081 || (talloc_array_length(conn->pending) != 0));
2084 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
2086 return conn->smb2.server.capabilities;
2089 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
2091 return conn->smb2.server.security_mode;
2094 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
2096 return conn->smb2.server.max_trans_size;
2099 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
2101 return conn->smb2.server.max_read_size;
2104 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
2106 return conn->smb2.server.max_write_size;
2109 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
2110 uint16_t max_credits)
2112 conn->smb2.max_credits = max_credits;
2115 static void smb2cli_req_cancel_done(struct tevent_req *subreq);
2117 static bool smb2cli_req_cancel(struct tevent_req *req)
2119 struct smbXcli_req_state *state =
2120 tevent_req_data(req,
2121 struct smbXcli_req_state);
2122 uint32_t flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2123 uint32_t pid = IVAL(state->smb2.hdr, SMB2_HDR_PID);
2124 uint32_t tid = IVAL(state->smb2.hdr, SMB2_HDR_TID);
2125 uint64_t mid = BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID);
2126 uint64_t aid = BVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID);
2127 struct smbXcli_session *session = state->session;
2128 uint8_t *fixed = state->smb2.pad;
2129 uint16_t fixed_len = 4;
2130 struct tevent_req *subreq;
2131 struct smbXcli_req_state *substate;
2134 SSVAL(fixed, 0, 0x04);
2137 subreq = smb2cli_req_create(state, state->ev,
2145 if (subreq == NULL) {
2148 substate = tevent_req_data(subreq, struct smbXcli_req_state);
2150 if (flags & SMB2_HDR_FLAG_ASYNC) {
2154 SIVAL(substate->smb2.hdr, SMB2_HDR_FLAGS, flags);
2155 SIVAL(substate->smb2.hdr, SMB2_HDR_PID, pid);
2156 SIVAL(substate->smb2.hdr, SMB2_HDR_TID, tid);
2157 SBVAL(substate->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2158 SBVAL(substate->smb2.hdr, SMB2_HDR_ASYNC_ID, aid);
2160 status = smb2cli_req_compound_submit(&subreq, 1);
2161 if (!NT_STATUS_IS_OK(status)) {
2162 TALLOC_FREE(subreq);
2166 tevent_req_set_callback(subreq, smb2cli_req_cancel_done, NULL);
2171 static void smb2cli_req_cancel_done(struct tevent_req *subreq)
2173 /* we do not care about the result */
2174 TALLOC_FREE(subreq);
2177 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2178 struct tevent_context *ev,
2179 struct smbXcli_conn *conn,
2181 uint32_t additional_flags,
2182 uint32_t clear_flags,
2183 uint32_t timeout_msec,
2186 struct smbXcli_session *session,
2187 const uint8_t *fixed,
2192 struct tevent_req *req;
2193 struct smbXcli_req_state *state;
2197 req = tevent_req_create(mem_ctx, &state,
2198 struct smbXcli_req_state);
2205 state->session = session;
2208 uid = session->smb2.session_id;
2211 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2212 if (state->smb2.recv_iov == NULL) {
2217 flags |= additional_flags;
2218 flags &= ~clear_flags;
2220 state->smb2.fixed = fixed;
2221 state->smb2.fixed_len = fixed_len;
2222 state->smb2.dyn = dyn;
2223 state->smb2.dyn_len = dyn_len;
2225 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2226 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2227 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2228 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2229 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2230 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2231 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2234 case SMB2_OP_CANCEL:
2235 state->one_way = true;
2239 * If this is a dummy request, it will have
2240 * UINT64_MAX as message id.
2241 * If we send on break acknowledgement,
2242 * this gets overwritten later.
2244 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2248 if (timeout_msec > 0) {
2249 struct timeval endtime;
2251 endtime = timeval_current_ofs_msec(timeout_msec);
2252 if (!tevent_req_set_endtime(req, ev, endtime)) {
2260 void smb2cli_req_set_notify_async(struct tevent_req *req)
2262 struct smbXcli_req_state *state =
2263 tevent_req_data(req,
2264 struct smbXcli_req_state);
2266 state->smb2.notify_async = true;
2269 static void smb2cli_req_writev_done(struct tevent_req *subreq);
2270 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2271 TALLOC_CTX *tmp_mem,
2274 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2277 struct smbXcli_req_state *state;
2278 struct tevent_req *subreq;
2280 int i, num_iov, nbt_len;
2283 * 1 for the nbt length
2284 * per request: HDR, fixed, dyn, padding
2285 * -1 because the last one does not need padding
2288 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2290 return NT_STATUS_NO_MEMORY;
2296 for (i=0; i<num_reqs; i++) {
2305 bool should_sign = false;
2307 if (!tevent_req_is_in_progress(reqs[i])) {
2308 return NT_STATUS_INTERNAL_ERROR;
2311 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2313 if (!smbXcli_conn_is_connected(state->conn)) {
2314 return NT_STATUS_CONNECTION_DISCONNECTED;
2317 if ((state->conn->protocol != PROTOCOL_NONE) &&
2318 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2319 return NT_STATUS_REVISION_MISMATCH;
2322 opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2323 if (opcode == SMB2_OP_CANCEL) {
2327 avail = UINT64_MAX - state->conn->smb2.mid;
2329 return NT_STATUS_CONNECTION_ABORTED;
2332 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2333 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2338 charge = MAX(state->smb2.credit_charge, charge);
2340 avail = MIN(avail, state->conn->smb2.cur_credits);
2341 if (avail < charge) {
2342 return NT_STATUS_INTERNAL_ERROR;
2346 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2347 credits = state->conn->smb2.max_credits -
2348 state->conn->smb2.cur_credits;
2350 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2354 mid = state->conn->smb2.mid;
2355 state->conn->smb2.mid += charge;
2356 state->conn->smb2.cur_credits -= charge;
2358 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2359 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2361 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2362 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2366 iov[num_iov].iov_base = state->smb2.hdr;
2367 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2370 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2371 iov[num_iov].iov_len = state->smb2.fixed_len;
2374 if (state->smb2.dyn != NULL) {
2375 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2376 iov[num_iov].iov_len = state->smb2.dyn_len;
2380 reqlen = sizeof(state->smb2.hdr);
2381 reqlen += state->smb2.fixed_len;
2382 reqlen += state->smb2.dyn_len;
2384 if (i < num_reqs-1) {
2385 if ((reqlen % 8) > 0) {
2386 uint8_t pad = 8 - (reqlen % 8);
2387 iov[num_iov].iov_base = state->smb2.pad;
2388 iov[num_iov].iov_len = pad;
2392 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2396 if (state->session) {
2397 should_sign = state->session->smb2.should_sign;
2398 if (state->session->smb2.channel_setup) {
2406 status = smb2_signing_sign_pdu(state->session->smb2.signing_key,
2407 &iov[hdr_iov], num_iov - hdr_iov);
2408 if (!NT_STATUS_IS_OK(status)) {
2413 ret = smbXcli_req_set_pending(reqs[i]);
2415 return NT_STATUS_NO_MEMORY;
2419 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2420 _smb_setlen_tcp(state->length_hdr, nbt_len);
2421 iov[0].iov_base = state->length_hdr;
2422 iov[0].iov_len = sizeof(state->length_hdr);
2424 if (state->conn->dispatch_incoming == NULL) {
2425 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2428 subreq = writev_send(state, state->ev, state->conn->outgoing,
2429 state->conn->write_fd, false, iov, num_iov);
2430 if (subreq == NULL) {
2431 return NT_STATUS_NO_MEMORY;
2433 tevent_req_set_callback(subreq, smb2cli_req_writev_done, reqs[0]);
2434 return NT_STATUS_OK;
2437 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2439 struct smbXcli_req_state *state =
2440 tevent_req_data(req,
2441 struct smbXcli_req_state);
2443 state->smb2.credit_charge = charge;
2446 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2447 struct tevent_context *ev,
2448 struct smbXcli_conn *conn,
2450 uint32_t additional_flags,
2451 uint32_t clear_flags,
2452 uint32_t timeout_msec,
2455 struct smbXcli_session *session,
2456 const uint8_t *fixed,
2461 struct tevent_req *req;
2464 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2465 additional_flags, clear_flags,
2468 fixed, fixed_len, dyn, dyn_len);
2472 if (!tevent_req_is_in_progress(req)) {
2473 return tevent_req_post(req, ev);
2475 status = smb2cli_req_compound_submit(&req, 1);
2476 if (tevent_req_nterror(req, status)) {
2477 return tevent_req_post(req, ev);
2482 static void smb2cli_req_writev_done(struct tevent_req *subreq)
2484 struct tevent_req *req =
2485 tevent_req_callback_data(subreq,
2487 struct smbXcli_req_state *state =
2488 tevent_req_data(req,
2489 struct smbXcli_req_state);
2493 nwritten = writev_recv(subreq, &err);
2494 TALLOC_FREE(subreq);
2495 if (nwritten == -1) {
2496 /* here, we need to notify all pending requests */
2497 NTSTATUS status = map_nt_error_from_unix_common(err);
2498 smbXcli_conn_disconnect(state->conn, status);
2503 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2504 struct iovec **piov, int *pnum_iov)
2514 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2516 return NT_STATUS_NO_MEMORY;
2519 buflen = smb_len_tcp(buf);
2521 first_hdr = buf + NBT_HDR_SIZE;
2523 while (taken < buflen) {
2524 size_t len = buflen - taken;
2525 uint8_t *hdr = first_hdr + taken;
2528 size_t next_command_ofs;
2530 struct iovec *iov_tmp;
2533 * We need the header plus the body length field
2536 if (len < SMB2_HDR_BODY + 2) {
2537 DEBUG(10, ("%d bytes left, expected at least %d\n",
2538 (int)len, SMB2_HDR_BODY));
2541 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2542 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2546 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2547 DEBUG(10, ("Got HDR len %d, expected %d\n",
2548 SVAL(hdr, 4), SMB2_HDR_BODY));
2553 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2554 body_size = SVAL(hdr, SMB2_HDR_BODY);
2556 if (next_command_ofs != 0) {
2557 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2560 if (next_command_ofs > full_size) {
2563 full_size = next_command_ofs;
2565 if (body_size < 2) {
2568 body_size &= 0xfffe;
2570 if (body_size > (full_size - SMB2_HDR_BODY)) {
2574 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2576 if (iov_tmp == NULL) {
2578 return NT_STATUS_NO_MEMORY;
2581 cur = &iov[num_iov];
2584 cur[0].iov_base = hdr;
2585 cur[0].iov_len = SMB2_HDR_BODY;
2586 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2587 cur[1].iov_len = body_size;
2588 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2589 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2595 *pnum_iov = num_iov;
2596 return NT_STATUS_OK;
2600 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2603 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2606 size_t num_pending = talloc_array_length(conn->pending);
2609 for (i=0; i<num_pending; i++) {
2610 struct tevent_req *req = conn->pending[i];
2611 struct smbXcli_req_state *state =
2612 tevent_req_data(req,
2613 struct smbXcli_req_state);
2615 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2622 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2623 TALLOC_CTX *tmp_mem,
2626 struct tevent_req *req;
2627 struct smbXcli_req_state *state = NULL;
2632 struct smbXcli_session *last_session = NULL;
2634 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2636 if (!NT_STATUS_IS_OK(status)) {
2640 for (i=0; i<num_iov; i+=3) {
2641 uint8_t *inbuf_ref = NULL;
2642 struct iovec *cur = &iov[i];
2643 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2644 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2645 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2646 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2647 uint16_t req_opcode;
2649 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2650 uint32_t new_credits;
2651 struct smbXcli_session *session = NULL;
2652 const DATA_BLOB *signing_key = NULL;
2653 bool should_sign = false;
2655 new_credits = conn->smb2.cur_credits;
2656 new_credits += credits;
2657 if (new_credits > UINT16_MAX) {
2658 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2660 conn->smb2.cur_credits += credits;
2662 req = smb2cli_conn_find_pending(conn, mid);
2664 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2666 state = tevent_req_data(req, struct smbXcli_req_state);
2668 state->smb2.got_async = false;
2670 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2671 if (opcode != req_opcode) {
2672 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2674 req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2676 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2677 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2680 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2681 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2682 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2683 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2686 * async interim responses are not signed,
2687 * even if the SMB2_HDR_FLAG_SIGNED flag
2690 req_flags |= SMB2_HDR_FLAG_ASYNC;
2691 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2692 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2694 if (state->smb2.notify_async) {
2695 state->smb2.got_async = true;
2696 tevent_req_defer_callback(req, state->ev);
2697 tevent_req_notify_callback(req);
2702 session = state->session;
2703 if (req_flags & SMB2_HDR_FLAG_CHAINED) {
2704 session = last_session;
2706 last_session = session;
2709 should_sign = session->smb2.should_sign;
2710 if (session->smb2.channel_setup) {
2716 if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
2717 return NT_STATUS_ACCESS_DENIED;
2721 if (flags & SMB2_HDR_FLAG_SIGNED) {
2722 uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2724 if (session == NULL) {
2725 struct smbXcli_session *s;
2727 s = state->conn->sessions;
2728 for (; s; s = s->next) {
2729 if (s->smb2.session_id != uid) {
2738 if (session == NULL) {
2739 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2742 last_session = session;
2743 signing_key = &session->smb2.signing_key;
2746 if ((opcode == SMB2_OP_SESSSETUP) &&
2747 NT_STATUS_IS_OK(status)) {
2749 * the caller has to check the signing
2750 * as only the caller knows the correct
2756 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
2758 * if the server returns NT_STATUS_USER_SESSION_DELETED
2759 * the response is not signed and we should
2760 * propagate the NT_STATUS_USER_SESSION_DELETED
2761 * status to the caller.
2768 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) ||
2769 NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) ||
2770 NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) {
2772 * if the server returns
2773 * NT_STATUS_NETWORK_NAME_DELETED
2774 * NT_STATUS_FILE_CLOSED
2775 * NT_STATUS_INVALID_PARAMETER
2776 * the response might not be signed
2777 * as this happens before the signing checks.
2779 * If server echos the signature (or all zeros)
2780 * we should report the status from the server
2786 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2787 state->smb2.hdr+SMB2_HDR_SIGNATURE,
2790 state->smb2.signing_skipped = true;
2796 static const uint8_t zeros[16];
2798 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2802 state->smb2.signing_skipped = true;
2809 status = smb2_signing_check_pdu(*signing_key, cur, 3);
2810 if (!NT_STATUS_IS_OK(status)) {
2812 * If the signing check fails, we disconnect
2819 smbXcli_req_unset_pending(req);
2822 * There might be more than one response
2823 * we need to defer the notifications
2825 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2830 tevent_req_defer_callback(req, state->ev);
2834 * Note: here we use talloc_reference() in a way
2835 * that does not expose it to the caller.
2837 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2838 if (tevent_req_nomem(inbuf_ref, req)) {
2842 /* copy the related buffers */
2843 state->smb2.recv_iov[0] = cur[0];
2844 state->smb2.recv_iov[1] = cur[1];
2845 state->smb2.recv_iov[2] = cur[2];
2847 tevent_req_done(req);
2851 return NT_STATUS_RETRY;
2854 return NT_STATUS_OK;
2857 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2858 struct iovec **piov,
2859 const struct smb2cli_req_expected_response *expected,
2860 size_t num_expected)
2862 struct smbXcli_req_state *state =
2863 tevent_req_data(req,
2864 struct smbXcli_req_state);
2867 bool found_status = false;
2868 bool found_size = false;
2875 if (state->smb2.got_async) {
2876 return STATUS_PENDING;
2879 if (tevent_req_is_nterror(req, &status)) {
2880 for (i=0; i < num_expected; i++) {
2881 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2882 found_status = true;
2888 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2894 if (num_expected == 0) {
2895 found_status = true;
2899 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2900 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2902 for (i=0; i < num_expected; i++) {
2903 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2907 found_status = true;
2908 if (expected[i].body_size == 0) {
2913 if (expected[i].body_size == body_size) {
2919 if (!found_status) {
2923 if (state->smb2.signing_skipped) {
2924 if (num_expected > 0) {
2925 return NT_STATUS_ACCESS_DENIED;
2927 if (!NT_STATUS_IS_ERR(status)) {
2928 return NT_STATUS_ACCESS_DENIED;
2933 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2937 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
2943 static const struct {
2944 enum protocol_types proto;
2945 const char *smb1_name;
2946 } smb1cli_prots[] = {
2947 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
2948 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
2949 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
2950 {PROTOCOL_LANMAN1, "LANMAN1.0"},
2951 {PROTOCOL_LANMAN2, "LM1.2X002"},
2952 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
2953 {PROTOCOL_LANMAN2, "LANMAN2.1"},
2954 {PROTOCOL_LANMAN2, "Samba"},
2955 {PROTOCOL_NT1, "NT LANMAN 1.0"},
2956 {PROTOCOL_NT1, "NT LM 0.12"},
2957 {PROTOCOL_SMB2_02, "SMB 2.002"},
2958 {PROTOCOL_SMB2_10, "SMB 2.???"},
2961 static const struct {
2962 enum protocol_types proto;
2963 uint16_t smb2_dialect;
2964 } smb2cli_prots[] = {
2965 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
2966 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
2967 {PROTOCOL_SMB2_22, SMB2_DIALECT_REVISION_222},
2970 struct smbXcli_negprot_state {
2971 struct smbXcli_conn *conn;
2972 struct tevent_context *ev;
2973 uint32_t timeout_msec;
2974 enum protocol_types min_protocol;
2975 enum protocol_types max_protocol;
2979 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
2983 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
2984 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
2985 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
2986 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
2987 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
2988 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
2992 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
2993 struct tevent_context *ev,
2994 struct smbXcli_conn *conn,
2995 uint32_t timeout_msec,
2996 enum protocol_types min_protocol,
2997 enum protocol_types max_protocol)
2999 struct tevent_req *req, *subreq;
3000 struct smbXcli_negprot_state *state;
3002 req = tevent_req_create(mem_ctx, &state,
3003 struct smbXcli_negprot_state);
3009 state->timeout_msec = timeout_msec;
3010 state->min_protocol = min_protocol;
3011 state->max_protocol = max_protocol;
3013 if (min_protocol == PROTOCOL_NONE) {
3014 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3015 return tevent_req_post(req, ev);
3018 if (max_protocol == PROTOCOL_NONE) {
3019 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3020 return tevent_req_post(req, ev);
3023 if (min_protocol > max_protocol) {
3024 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3025 return tevent_req_post(req, ev);
3028 if ((min_protocol < PROTOCOL_SMB2_02) &&
3029 (max_protocol < PROTOCOL_SMB2_02)) {
3033 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3035 subreq = smbXcli_negprot_smb1_subreq(state);
3036 if (tevent_req_nomem(subreq, req)) {
3037 return tevent_req_post(req, ev);
3039 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3043 if ((min_protocol >= PROTOCOL_SMB2_02) &&
3044 (max_protocol >= PROTOCOL_SMB2_02)) {
3048 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3050 subreq = smbXcli_negprot_smb2_subreq(state);
3051 if (tevent_req_nomem(subreq, req)) {
3052 return tevent_req_post(req, ev);
3054 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3059 * We send an SMB1 negprot with the SMB2 dialects
3060 * and expect a SMB1 or a SMB2 response.
3062 * smbXcli_negprot_dispatch_incoming() will fix the
3063 * callback to match protocol of the response.
3065 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
3067 subreq = smbXcli_negprot_smb1_subreq(state);
3068 if (tevent_req_nomem(subreq, req)) {
3069 return tevent_req_post(req, ev);
3071 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
3075 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
3077 struct tevent_req *req =
3078 tevent_req_callback_data(subreq,
3083 * we just want the low level error
3085 status = tevent_req_simple_recv_ntstatus(subreq);
3086 TALLOC_FREE(subreq);
3087 if (tevent_req_nterror(req, status)) {
3091 /* this should never happen */
3092 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
3095 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
3098 DATA_BLOB bytes = data_blob_null;
3102 /* setup the protocol strings */
3103 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3107 if (smb1cli_prots[i].proto < state->min_protocol) {
3111 if (smb1cli_prots[i].proto > state->max_protocol) {
3115 ok = data_blob_append(state, &bytes, &c, sizeof(c));
3121 * We now it is already ascii and
3122 * we want NULL termination.
3124 ok = data_blob_append(state, &bytes,
3125 smb1cli_prots[i].smb1_name,
3126 strlen(smb1cli_prots[i].smb1_name)+1);
3132 smb1cli_req_flags(state->max_protocol,
3133 state->conn->smb1.client.capabilities,
3138 return smb1cli_req_send(state, state->ev, state->conn,
3142 state->timeout_msec,
3143 0xFFFE, 0, 0, /* pid, tid, uid */
3144 0, NULL, /* wct, vwv */
3145 bytes.length, bytes.data);
3148 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
3150 struct tevent_req *req =
3151 tevent_req_callback_data(subreq,
3153 struct smbXcli_negprot_state *state =
3154 tevent_req_data(req,
3155 struct smbXcli_negprot_state);
3156 struct smbXcli_conn *conn = state->conn;
3157 struct iovec *recv_iov = NULL;
3166 size_t num_prots = 0;
3168 uint32_t client_capabilities = conn->smb1.client.capabilities;
3169 uint32_t both_capabilities;
3170 uint32_t server_capabilities = 0;
3171 uint32_t capabilities;
3172 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
3173 uint32_t server_max_xmit = 0;
3175 uint32_t server_max_mux = 0;
3176 uint16_t server_security_mode = 0;
3177 uint32_t server_session_key = 0;
3178 bool server_readbraw = false;
3179 bool server_writebraw = false;
3180 bool server_lockread = false;
3181 bool server_writeunlock = false;
3182 struct GUID server_guid = GUID_zero();
3183 DATA_BLOB server_gss_blob = data_blob_null;
3184 uint8_t server_challenge[8];
3185 char *server_workgroup = NULL;
3186 char *server_name = NULL;
3187 int server_time_zone = 0;
3188 NTTIME server_system_time = 0;
3189 static const struct smb1cli_req_expected_response expected[] = {
3191 .status = NT_STATUS_OK,
3192 .wct = 0x11, /* NT1 */
3195 .status = NT_STATUS_OK,
3196 .wct = 0x0D, /* LM */
3199 .status = NT_STATUS_OK,
3200 .wct = 0x01, /* CORE */
3204 ZERO_STRUCT(server_challenge);
3206 status = smb1cli_req_recv(subreq, state,
3211 NULL, /* pvwv_offset */
3214 NULL, /* pbytes_offset */
3216 expected, ARRAY_SIZE(expected));
3217 TALLOC_FREE(subreq);
3218 if (tevent_req_nterror(req, status)) {
3222 flags = CVAL(inhdr, HDR_FLG);
3224 protnum = SVAL(vwv, 0);
3226 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3227 if (smb1cli_prots[i].proto < state->min_protocol) {
3231 if (smb1cli_prots[i].proto > state->max_protocol) {
3235 if (protnum != num_prots) {
3240 conn->protocol = smb1cli_prots[i].proto;
3244 if (conn->protocol == PROTOCOL_NONE) {
3245 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3249 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
3250 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
3251 "and the selected protocol level doesn't support it.\n"));
3252 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3256 if (flags & FLAG_SUPPORT_LOCKREAD) {
3257 server_lockread = true;
3258 server_writeunlock = true;
3261 if (conn->protocol >= PROTOCOL_NT1) {
3262 const char *client_signing = NULL;
3263 bool server_mandatory = false;
3264 bool server_allowed = false;
3265 const char *server_signing = NULL;
3270 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3275 server_security_mode = CVAL(vwv + 1, 0);
3276 server_max_mux = SVAL(vwv + 1, 1);
3277 server_max_xmit = IVAL(vwv + 3, 1);
3278 server_session_key = IVAL(vwv + 7, 1);
3279 server_time_zone = SVALS(vwv + 15, 1);
3280 server_time_zone *= 60;
3281 /* this time arrives in real GMT */
3282 server_system_time = BVAL(vwv + 11, 1);
3283 server_capabilities = IVAL(vwv + 9, 1);
3285 key_len = CVAL(vwv + 16, 1);
3287 if (server_capabilities & CAP_RAW_MODE) {
3288 server_readbraw = true;
3289 server_writebraw = true;
3291 if (server_capabilities & CAP_LOCK_AND_READ) {
3292 server_lockread = true;
3295 if (server_capabilities & CAP_EXTENDED_SECURITY) {
3296 DATA_BLOB blob1, blob2;
3298 if (num_bytes < 16) {
3299 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3303 blob1 = data_blob_const(bytes, 16);
3304 status = GUID_from_data_blob(&blob1, &server_guid);
3305 if (tevent_req_nterror(req, status)) {
3309 blob1 = data_blob_const(bytes+16, num_bytes-16);
3310 blob2 = data_blob_dup_talloc(state, blob1);
3311 if (blob1.length > 0 &&
3312 tevent_req_nomem(blob2.data, req)) {
3315 server_gss_blob = blob2;
3317 DATA_BLOB blob1, blob2;
3319 if (num_bytes < key_len) {
3320 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3324 if (key_len != 0 && key_len != 8) {
3325 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3330 memcpy(server_challenge, bytes, 8);
3333 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3334 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
3335 if (blob1.length > 0) {
3338 len = utf16_len_n(blob1.data,
3342 ok = convert_string_talloc(state,
3350 status = map_nt_error_from_unix_common(errno);
3351 tevent_req_nterror(req, status);
3356 blob2.data += blob1.length;
3357 blob2.length -= blob1.length;
3358 if (blob2.length > 0) {
3361 len = utf16_len_n(blob1.data,
3365 ok = convert_string_talloc(state,
3373 status = map_nt_error_from_unix_common(errno);
3374 tevent_req_nterror(req, status);
3380 client_signing = "disabled";
3381 if (conn->allow_signing) {
3382 client_signing = "allowed";
3384 if (conn->mandatory_signing) {
3385 client_signing = "required";
3388 server_signing = "not supported";
3389 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3390 server_signing = "supported";
3391 server_allowed = true;
3393 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3394 server_signing = "required";
3395 server_mandatory = true;
3398 ok = smb_signing_set_negotiated(conn->smb1.signing,
3402 DEBUG(1,("cli_negprot: SMB signing is required, "
3403 "but client[%s] and server[%s] mismatch\n",
3404 client_signing, server_signing));
3405 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3409 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3415 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3419 server_security_mode = SVAL(vwv + 1, 0);
3420 server_max_xmit = SVAL(vwv + 2, 0);
3421 server_max_mux = SVAL(vwv + 3, 0);
3422 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3423 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3424 server_session_key = IVAL(vwv + 6, 0);
3425 server_time_zone = SVALS(vwv + 10, 0);
3426 server_time_zone *= 60;
3427 /* this time is converted to GMT by make_unix_date */
3428 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3429 unix_to_nt_time(&server_system_time, t);
3430 key_len = SVAL(vwv + 11, 0);
3432 if (num_bytes < key_len) {
3433 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3437 if (key_len != 0 && key_len != 8) {
3438 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3443 memcpy(server_challenge, bytes, 8);
3446 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3447 if (blob1.length > 0) {
3451 len = utf16_len_n(blob1.data,
3455 ok = convert_string_talloc(state,
3463 status = map_nt_error_from_unix_common(errno);
3464 tevent_req_nterror(req, status);
3470 /* the old core protocol */
3471 server_time_zone = get_time_zone(time(NULL));
3472 server_max_xmit = 1024;
3476 if (server_max_xmit < 1024) {
3477 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3481 if (server_max_mux < 1) {
3482 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3487 * Now calculate the negotiated capabilities
3488 * based on the mask for:
3489 * - client only flags
3490 * - flags used in both directions
3491 * - server only flags
3493 both_capabilities = client_capabilities & server_capabilities;
3494 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3495 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3496 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3498 max_xmit = MIN(client_max_xmit, server_max_xmit);
3500 conn->smb1.server.capabilities = server_capabilities;
3501 conn->smb1.capabilities = capabilities;
3503 conn->smb1.server.max_xmit = server_max_xmit;
3504 conn->smb1.max_xmit = max_xmit;
3506 conn->smb1.server.max_mux = server_max_mux;
3508 conn->smb1.server.security_mode = server_security_mode;
3510 conn->smb1.server.readbraw = server_readbraw;
3511 conn->smb1.server.writebraw = server_writebraw;
3512 conn->smb1.server.lockread = server_lockread;
3513 conn->smb1.server.writeunlock = server_writeunlock;
3515 conn->smb1.server.session_key = server_session_key;
3517 talloc_steal(conn, server_gss_blob.data);
3518 conn->smb1.server.gss_blob = server_gss_blob;
3519 conn->smb1.server.guid = server_guid;
3520 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3521 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3522 conn->smb1.server.name = talloc_move(conn, &server_name);
3524 conn->smb1.server.time_zone = server_time_zone;
3525 conn->smb1.server.system_time = server_system_time;
3527 tevent_req_done(req);
3530 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3534 uint16_t dialect_count = 0;
3536 buf = state->smb2.dyn;
3537 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3538 if (smb2cli_prots[i].proto < state->min_protocol) {
3542 if (smb2cli_prots[i].proto > state->max_protocol) {
3546 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3550 buf = state->smb2.fixed;
3552 SSVAL(buf, 2, dialect_count);
3553 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3554 SSVAL(buf, 6, 0); /* Reserved */
3555 SSVAL(buf, 8, 0); /* Capabilities */
3556 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3560 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3562 if (!NT_STATUS_IS_OK(status)) {
3565 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3567 memset(buf+12, 0, 16); /* ClientGuid */
3569 SBVAL(buf, 28, 0); /* ClientStartTime */
3571 return smb2cli_req_send(state, state->ev,
3572 state->conn, SMB2_OP_NEGPROT,
3574 state->timeout_msec,
3575 0xFEFF, 0, NULL, /* pid, tid, session */
3576 state->smb2.fixed, sizeof(state->smb2.fixed),
3577 state->smb2.dyn, dialect_count*2);
3580 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3582 struct tevent_req *req =
3583 tevent_req_callback_data(subreq,
3585 struct smbXcli_negprot_state *state =
3586 tevent_req_data(req,
3587 struct smbXcli_negprot_state);
3588 struct smbXcli_conn *conn = state->conn;
3589 size_t security_offset, security_length;
3595 uint16_t dialect_revision;
3596 static const struct smb2cli_req_expected_response expected[] = {
3598 .status = NT_STATUS_OK,
3603 status = smb2cli_req_recv(subreq, state, &iov,
3604 expected, ARRAY_SIZE(expected));
3605 TALLOC_FREE(subreq);
3606 if (tevent_req_nterror(req, status)) {
3610 body = (uint8_t *)iov[1].iov_base;
3612 dialect_revision = SVAL(body, 4);
3614 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3615 if (smb2cli_prots[i].proto < state->min_protocol) {
3619 if (smb2cli_prots[i].proto > state->max_protocol) {
3623 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3627 conn->protocol = smb2cli_prots[i].proto;
3631 if (conn->protocol == PROTOCOL_NONE) {
3632 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3633 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3637 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3638 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3642 /* make sure we do not loop forever */
3643 state->min_protocol = PROTOCOL_SMB2_02;
3646 * send a SMB2 negprot, in order to negotiate
3647 * the SMB2 dialect. This needs to use the
3650 state->conn->smb2.mid = 1;
3651 subreq = smbXcli_negprot_smb2_subreq(state);
3652 if (tevent_req_nomem(subreq, req)) {
3655 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3659 conn->smb2.server.security_mode = SVAL(body, 2);
3661 blob = data_blob_const(body + 8, 16);
3662 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3663 if (tevent_req_nterror(req, status)) {
3667 conn->smb2.server.capabilities = IVAL(body, 24);
3668 conn->smb2.server.max_trans_size= IVAL(body, 28);
3669 conn->smb2.server.max_read_size = IVAL(body, 32);
3670 conn->smb2.server.max_write_size= IVAL(body, 36);
3671 conn->smb2.server.system_time = BVAL(body, 40);
3672 conn->smb2.server.start_time = BVAL(body, 48);
3674 security_offset = SVAL(body, 56);
3675 security_length = SVAL(body, 58);
3677 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3678 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3682 if (security_length > iov[2].iov_len) {
3683 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3687 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3690 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3694 tevent_req_done(req);
3697 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3698 TALLOC_CTX *tmp_mem,
3701 size_t num_pending = talloc_array_length(conn->pending);
3702 struct tevent_req *subreq;
3703 struct smbXcli_req_state *substate;
3704 struct tevent_req *req;
3705 uint32_t protocol_magic = IVAL(inbuf, 4);
3707 if (num_pending != 1) {
3708 return NT_STATUS_INTERNAL_ERROR;
3711 subreq = conn->pending[0];
3712 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3713 req = tevent_req_callback_data(subreq, struct tevent_req);
3715 switch (protocol_magic) {
3717 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3718 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3719 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3722 if (substate->smb2.recv_iov == NULL) {
3724 * For the SMB1 negprot we have move it.
3726 substate->smb2.recv_iov = substate->smb1.recv_iov;
3727 substate->smb1.recv_iov = NULL;
3730 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3731 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3732 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3735 DEBUG(10, ("Got non-SMB PDU\n"));
3736 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3739 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3741 return tevent_req_simple_recv_ntstatus(req);
3744 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3745 uint32_t timeout_msec,
3746 enum protocol_types min_protocol,
3747 enum protocol_types max_protocol)
3749 TALLOC_CTX *frame = talloc_stackframe();
3750 struct tevent_context *ev;
3751 struct tevent_req *req;
3752 NTSTATUS status = NT_STATUS_NO_MEMORY;
3755 if (smbXcli_conn_has_async_calls(conn)) {
3757 * Can't use sync call while an async call is in flight
3759 status = NT_STATUS_INVALID_PARAMETER_MIX;
3762 ev = tevent_context_init(frame);
3766 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3767 min_protocol, max_protocol);
3771 ok = tevent_req_poll(req, ev);
3773 status = map_nt_error_from_unix_common(errno);
3776 status = smbXcli_negprot_recv(req);
3782 static int smbXcli_session_destructor(struct smbXcli_session *session)
3784 if (session->conn == NULL) {
3788 DLIST_REMOVE(session->conn->sessions, session);
3792 struct smbXcli_session *smbXcli_session_create(TALLOC_CTX *mem_ctx,
3793 struct smbXcli_conn *conn)
3795 struct smbXcli_session *session;
3797 session = talloc_zero(mem_ctx, struct smbXcli_session);
3798 if (session == NULL) {
3801 talloc_set_destructor(session, smbXcli_session_destructor);
3803 DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
3804 session->conn = conn;
3809 uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
3811 struct smbXcli_conn *conn = session->conn;
3812 uint8_t security_mode = 0;
3815 return security_mode;
3818 security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
3819 if (conn->mandatory_signing) {
3820 security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
3823 return security_mode;
3826 uint64_t smb2cli_session_current_id(struct smbXcli_session *session)
3828 return session->smb2.session_id;
3831 void smb2cli_session_set_id_and_flags(struct smbXcli_session *session,
3832 uint64_t session_id,
3833 uint16_t session_flags)
3835 session->smb2.session_id = session_id;
3836 session->smb2.session_flags = session_flags;
3839 NTSTATUS smb2cli_session_update_session_key(struct smbXcli_session *session,
3840 const DATA_BLOB session_key,
3841 const struct iovec *recv_iov)
3843 struct smbXcli_conn *conn = session->conn;
3844 uint16_t no_sign_flags;
3845 DATA_BLOB signing_key;
3849 return NT_STATUS_INVALID_PARAMETER_MIX;
3852 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3854 if (session->smb2.session_flags & no_sign_flags) {
3855 session->smb2.should_sign = false;
3856 return NT_STATUS_OK;
3859 if (session->smb2.signing_key.length > 0) {
3860 signing_key = session->smb2.signing_key;
3862 signing_key = session_key;
3864 if (session->smb2.channel_setup) {
3865 signing_key = session_key;
3868 status = smb2_signing_check_pdu(signing_key, recv_iov, 3);
3869 if (!NT_STATUS_IS_OK(status)) {
3873 if (!session->smb2.channel_setup) {
3874 session->smb2.session_key = data_blob_dup_talloc(session,
3876 if (session->smb2.session_key.data == NULL) {
3877 return NT_STATUS_NO_MEMORY;
3881 if (session->smb2.channel_setup) {
3882 data_blob_free(&session->smb2.signing_key);
3883 session->smb2.channel_setup = false;
3886 if (session->smb2.signing_key.length > 0) {
3887 return NT_STATUS_OK;
3890 session->smb2.signing_key = data_blob_dup_talloc(session, signing_key);
3891 if (session->smb2.signing_key.data == NULL) {
3892 return NT_STATUS_NO_MEMORY;
3895 session->smb2.should_sign = false;
3897 if (conn->desire_signing) {
3898 session->smb2.should_sign = true;
3901 if (conn->smb2.server.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
3902 session->smb2.should_sign = true;
3905 return NT_STATUS_OK;
3908 NTSTATUS smb2cli_session_create_channel(TALLOC_CTX *mem_ctx,
3909 struct smbXcli_session *session1,
3910 struct smbXcli_conn *conn,
3911 struct smbXcli_session **_session2)
3913 struct smbXcli_session *session2;
3914 uint16_t no_sign_flags;
3916 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3918 if (session1->smb2.session_flags & no_sign_flags) {
3919 return NT_STATUS_INVALID_PARAMETER_MIX;
3922 if (session1->smb2.session_key.length == 0) {
3923 return NT_STATUS_INVALID_PARAMETER_MIX;
3926 if (session1->smb2.signing_key.length == 0) {
3927 return NT_STATUS_INVALID_PARAMETER_MIX;
3931 return NT_STATUS_INVALID_PARAMETER_MIX;
3934 session2 = talloc_zero(mem_ctx, struct smbXcli_session);
3935 if (session2 == NULL) {
3936 return NT_STATUS_NO_MEMORY;
3938 session2->smb2.session_id = session1->smb2.session_id;
3939 session2->smb2.session_flags = session1->smb2.session_flags;
3941 session2->smb2.session_key = data_blob_dup_talloc(session2,
3942 session1->smb2.session_key);
3943 if (session2->smb2.session_key.data == NULL) {
3944 return NT_STATUS_NO_MEMORY;
3947 session2->smb2.signing_key = data_blob_dup_talloc(session2,
3948 session1->smb2.signing_key);
3949 if (session2->smb2.signing_key.data == NULL) {
3950 return NT_STATUS_NO_MEMORY;
3953 session2->smb2.should_sign = session1->smb2.should_sign;
3954 session2->smb2.channel_setup = true;
3956 talloc_set_destructor(session2, smbXcli_session_destructor);
3957 DLIST_ADD_END(conn->sessions, session2, struct smbXcli_session *);
3958 session2->conn = conn;
3960 *_session2 = session2;
3961 return NT_STATUS_OK;