2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "../libcli/smb/smb_common.h"
29 #include "../libcli/smb/smb_seal.h"
30 #include "../libcli/smb/smb_signing.h"
31 #include "../libcli/smb/read_smb.h"
32 #include "smbXcli_base.h"
33 #include "librpc/ndr/libndr.h"
37 struct smbXcli_session;
41 struct sockaddr_storage local_ss;
42 struct sockaddr_storage remote_ss;
43 const char *remote_name;
45 struct tevent_queue *outgoing;
46 struct tevent_req **pending;
47 struct tevent_req *read_smb_req;
49 enum protocol_types protocol;
52 bool mandatory_signing;
55 * The incoming dispatch function should return:
56 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
57 * - NT_STATUS_OK, if no more processing is desired, e.g.
58 * the dispatch function called
60 * - All other return values disconnect the connection.
62 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
68 uint32_t capabilities;
73 uint32_t capabilities;
76 uint16_t security_mode;
85 const char *workgroup;
91 uint32_t capabilities;
96 struct smb_signing_state *signing;
97 struct smb_trans_enc_state *trans_enc;
102 uint16_t security_mode;
107 uint32_t capabilities;
108 uint16_t security_mode;
110 uint32_t max_trans_size;
111 uint32_t max_read_size;
112 uint32_t max_write_size;
119 uint16_t cur_credits;
120 uint16_t max_credits;
123 struct smbXcli_session *sessions;
126 struct smbXcli_session {
127 struct smbXcli_session *prev, *next;
128 struct smbXcli_conn *conn;
132 uint16_t session_flags;
133 DATA_BLOB signing_key;
134 DATA_BLOB session_key;
139 struct smbXcli_req_state {
140 struct tevent_context *ev;
141 struct smbXcli_conn *conn;
142 struct smbXcli_session *session; /* maybe NULL */
144 uint8_t length_hdr[4];
151 /* Space for the header including the wct */
152 uint8_t hdr[HDR_VWV];
155 * For normal requests, smb1cli_req_send chooses a mid.
156 * SecondaryV trans requests need to use the mid of the primary
157 * request, so we need a place to store it.
158 * Assume it is set if != 0.
163 uint8_t bytecount_buf[2];
165 #define MAX_SMB_IOV 5
166 /* length_hdr, hdr, words, byte_count, buffers */
167 struct iovec iov[1 + 3 + MAX_SMB_IOV];
171 struct tevent_req **chained_requests;
174 NTSTATUS recv_status;
175 /* always an array of 3 talloc elements */
176 struct iovec *recv_iov;
180 const uint8_t *fixed;
186 uint8_t pad[7]; /* padding space for compounding */
188 /* always an array of 3 talloc elements */
189 struct iovec *recv_iov;
191 uint16_t credit_charge;
193 bool signing_skipped;
197 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
200 * NT_STATUS_OK, means we do not notify the callers
202 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
204 while (conn->sessions) {
205 conn->sessions->conn = NULL;
206 DLIST_REMOVE(conn->sessions, conn->sessions);
209 if (conn->smb1.trans_enc) {
210 common_free_encryption_state(&conn->smb1.trans_enc);
216 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
218 const char *remote_name,
219 enum smb_signing_setting signing_state,
220 uint32_t smb1_capabilities,
221 struct GUID *client_guid)
223 struct smbXcli_conn *conn = NULL;
225 struct sockaddr *sa = NULL;
229 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
234 conn->remote_name = talloc_strdup(conn, remote_name);
235 if (conn->remote_name == NULL) {
241 ss = (void *)&conn->local_ss;
242 sa = (struct sockaddr *)ss;
243 sa_length = sizeof(conn->local_ss);
244 ret = getsockname(fd, sa, &sa_length);
248 ss = (void *)&conn->remote_ss;
249 sa = (struct sockaddr *)ss;
250 sa_length = sizeof(conn->remote_ss);
251 ret = getpeername(fd, sa, &sa_length);
256 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
257 if (conn->outgoing == NULL) {
260 conn->pending = NULL;
262 conn->protocol = PROTOCOL_NONE;
264 switch (signing_state) {
265 case SMB_SIGNING_OFF:
267 conn->allow_signing = false;
268 conn->desire_signing = false;
269 conn->mandatory_signing = false;
271 case SMB_SIGNING_DEFAULT:
272 case SMB_SIGNING_IF_REQUIRED:
273 /* if the server requires it */
274 conn->allow_signing = true;
275 conn->desire_signing = false;
276 conn->mandatory_signing = false;
278 case SMB_SIGNING_REQUIRED:
280 conn->allow_signing = true;
281 conn->desire_signing = true;
282 conn->mandatory_signing = true;
286 conn->smb1.client.capabilities = smb1_capabilities;
287 conn->smb1.client.max_xmit = UINT16_MAX;
289 conn->smb1.capabilities = conn->smb1.client.capabilities;
290 conn->smb1.max_xmit = 1024;
294 /* initialise signing */
295 conn->smb1.signing = smb_signing_init(conn,
297 conn->desire_signing,
298 conn->mandatory_signing);
299 if (!conn->smb1.signing) {
303 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
304 if (conn->mandatory_signing) {
305 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
308 conn->smb2.client.guid = *client_guid;
311 conn->smb2.cur_credits = 1;
312 conn->smb2.max_credits = 0;
314 talloc_set_destructor(conn, smbXcli_conn_destructor);
322 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
328 if (conn->fd == -1) {
335 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
337 return conn->protocol;
340 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
342 if (conn->protocol >= PROTOCOL_SMB2_02) {
346 if (conn->smb1.capabilities & CAP_UNICODE) {
353 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
355 set_socket_options(conn->fd, options);
358 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
360 return &conn->local_ss;
363 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
365 return &conn->remote_ss;
368 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
370 return conn->remote_name;
373 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
375 if (conn->protocol >= PROTOCOL_SMB2_02) {
382 return conn->smb1.server.max_mux;
385 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
387 if (conn->protocol >= PROTOCOL_SMB2_02) {
388 return conn->smb2.server.system_time;
391 return conn->smb1.server.system_time;
394 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
396 if (conn->protocol >= PROTOCOL_SMB2_02) {
397 return &conn->smb2.server.gss_blob;
400 return &conn->smb1.server.gss_blob;
403 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
405 if (conn->protocol >= PROTOCOL_SMB2_02) {
406 return &conn->smb2.server.guid;
409 return &conn->smb1.server.guid;
412 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
414 return conn->smb1.capabilities;
417 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
419 return conn->smb1.max_xmit;
422 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
424 return conn->smb1.server.session_key;
427 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
429 return conn->smb1.server.challenge;
432 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
434 return conn->smb1.server.security_mode;
437 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
439 return conn->smb1.server.time_zone;
442 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
443 const DATA_BLOB user_session_key,
444 const DATA_BLOB response)
446 return smb_signing_activate(conn->smb1.signing,
451 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
452 const uint8_t *buf, uint32_t seqnum)
454 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
457 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
459 return smb_signing_is_active(conn->smb1.signing);
462 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
463 struct smb_trans_enc_state *es)
465 /* Replace the old state, if any. */
466 if (conn->smb1.trans_enc) {
467 common_free_encryption_state(&conn->smb1.trans_enc);
469 conn->smb1.trans_enc = es;
472 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
474 return common_encryption_on(conn->smb1.trans_enc);
478 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
480 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
481 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
483 if (NT_STATUS_IS_OK(status)) {
487 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
491 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
495 * Is the SMB command able to hold an AND_X successor
496 * @param[in] cmd The SMB command in question
497 * @retval Can we add a chained request after "cmd"?
499 bool smb1cli_is_andx_req(uint8_t cmd)
519 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
521 size_t num_pending = talloc_array_length(conn->pending);
527 result = conn->smb1.mid++;
528 if ((result == 0) || (result == 0xffff)) {
532 for (i=0; i<num_pending; i++) {
533 if (result == smb1cli_req_mid(conn->pending[i])) {
538 if (i == num_pending) {
544 void smbXcli_req_unset_pending(struct tevent_req *req)
546 struct smbXcli_req_state *state =
548 struct smbXcli_req_state);
549 struct smbXcli_conn *conn = state->conn;
550 size_t num_pending = talloc_array_length(conn->pending);
553 if (state->smb1.mid != 0) {
555 * This is a [nt]trans[2] request which waits
556 * for more than one reply.
561 talloc_set_destructor(req, NULL);
563 if (num_pending == 1) {
565 * The pending read_smb tevent_req is a child of
566 * conn->pending. So if nothing is pending anymore, we need to
567 * delete the socket read fde.
569 TALLOC_FREE(conn->pending);
570 conn->read_smb_req = NULL;
574 for (i=0; i<num_pending; i++) {
575 if (req == conn->pending[i]) {
579 if (i == num_pending) {
581 * Something's seriously broken. Just returning here is the
582 * right thing nevertheless, the point of this routine is to
583 * remove ourselves from conn->pending.
589 * Remove ourselves from the conn->pending array
591 for (; i < (num_pending - 1); i++) {
592 conn->pending[i] = conn->pending[i+1];
596 * No NULL check here, we're shrinking by sizeof(void *), and
597 * talloc_realloc just adjusts the size for this.
599 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
604 static int smbXcli_req_destructor(struct tevent_req *req)
606 struct smbXcli_req_state *state =
608 struct smbXcli_req_state);
611 * Make sure we really remove it from
612 * the pending array on destruction.
615 smbXcli_req_unset_pending(req);
619 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
621 bool smbXcli_req_set_pending(struct tevent_req *req)
623 struct smbXcli_req_state *state =
625 struct smbXcli_req_state);
626 struct smbXcli_conn *conn;
627 struct tevent_req **pending;
632 if (!smbXcli_conn_is_connected(conn)) {
636 num_pending = talloc_array_length(conn->pending);
638 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
640 if (pending == NULL) {
643 pending[num_pending] = req;
644 conn->pending = pending;
645 talloc_set_destructor(req, smbXcli_req_destructor);
647 if (!smbXcli_conn_receive_next(conn)) {
649 * the caller should notify the current request
651 * And all other pending requests get notified
652 * by smbXcli_conn_disconnect().
654 smbXcli_req_unset_pending(req);
655 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
662 static void smbXcli_conn_received(struct tevent_req *subreq);
664 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
666 size_t num_pending = talloc_array_length(conn->pending);
667 struct tevent_req *req;
668 struct smbXcli_req_state *state;
670 if (conn->read_smb_req != NULL) {
674 if (num_pending == 0) {
675 if (conn->smb2.mid < UINT64_MAX) {
676 /* no more pending requests, so we are done for now */
681 * If there are no more SMB2 requests possible,
682 * because we are out of message ids,
683 * we need to disconnect.
685 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
689 req = conn->pending[0];
690 state = tevent_req_data(req, struct smbXcli_req_state);
693 * We're the first ones, add the read_smb request that waits for the
694 * answer from the server
696 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
697 if (conn->read_smb_req == NULL) {
700 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
704 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
706 if (conn->fd != -1) {
712 * Cancel all pending requests. We do not do a for-loop walking
713 * conn->pending because that array changes in
714 * smbXcli_req_unset_pending.
716 while (talloc_array_length(conn->pending) > 0) {
717 struct tevent_req *req;
718 struct smbXcli_req_state *state;
719 struct tevent_req **chain;
723 req = conn->pending[0];
724 state = tevent_req_data(req, struct smbXcli_req_state);
726 if (state->smb1.chained_requests == NULL) {
728 * We're dead. No point waiting for trans2
733 smbXcli_req_unset_pending(req);
735 if (NT_STATUS_IS_OK(status)) {
736 /* do not notify the callers */
741 * we need to defer the callback, because we may notify
742 * more then one caller.
744 tevent_req_defer_callback(req, state->ev);
745 tevent_req_nterror(req, status);
749 chain = talloc_move(conn, &state->smb1.chained_requests);
750 num_chained = talloc_array_length(chain);
752 for (i=0; i<num_chained; i++) {
754 state = tevent_req_data(req, struct smbXcli_req_state);
757 * We're dead. No point waiting for trans2
762 smbXcli_req_unset_pending(req);
764 if (NT_STATUS_IS_OK(status)) {
765 /* do not notify the callers */
770 * we need to defer the callback, because we may notify
771 * more then one caller.
773 tevent_req_defer_callback(req, state->ev);
774 tevent_req_nterror(req, status);
781 * Fetch a smb request's mid. Only valid after the request has been sent by
782 * smb1cli_req_send().
784 uint16_t smb1cli_req_mid(struct tevent_req *req)
786 struct smbXcli_req_state *state =
788 struct smbXcli_req_state);
790 if (state->smb1.mid != 0) {
791 return state->smb1.mid;
794 return SVAL(state->smb1.hdr, HDR_MID);
797 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
799 struct smbXcli_req_state *state =
801 struct smbXcli_req_state);
803 state->smb1.mid = mid;
806 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
808 struct smbXcli_req_state *state =
810 struct smbXcli_req_state);
812 return state->smb1.seqnum;
815 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
817 struct smbXcli_req_state *state =
819 struct smbXcli_req_state);
821 state->smb1.seqnum = seqnum;
824 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
828 for (i=0; i<count; i++) {
829 result += iov[i].iov_len;
834 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
835 const struct iovec *iov,
838 size_t len = smbXcli_iov_len(iov, count);
843 buf = talloc_array(mem_ctx, uint8_t, len);
848 for (i=0; i<count; i++) {
849 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
850 copied += iov[i].iov_len;
855 static void smb1cli_req_flags(enum protocol_types protocol,
856 uint32_t smb1_capabilities,
858 uint8_t additional_flags,
861 uint16_t additional_flags2,
862 uint16_t clear_flags2,
868 if (protocol >= PROTOCOL_LANMAN1) {
869 flags |= FLAG_CASELESS_PATHNAMES;
870 flags |= FLAG_CANONICAL_PATHNAMES;
873 if (protocol >= PROTOCOL_LANMAN2) {
874 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
875 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
878 if (protocol >= PROTOCOL_NT1) {
879 flags2 |= FLAGS2_IS_LONG_NAME;
881 if (smb1_capabilities & CAP_UNICODE) {
882 flags2 |= FLAGS2_UNICODE_STRINGS;
884 if (smb1_capabilities & CAP_STATUS32) {
885 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
887 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
888 flags2 |= FLAGS2_EXTENDED_SECURITY;
892 flags |= additional_flags;
893 flags &= ~clear_flags;
894 flags2 |= additional_flags2;
895 flags2 &= ~clear_flags2;
901 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
902 struct tevent_context *ev,
903 struct smbXcli_conn *conn,
905 uint8_t additional_flags,
907 uint16_t additional_flags2,
908 uint16_t clear_flags2,
909 uint32_t timeout_msec,
913 uint8_t wct, uint16_t *vwv,
915 struct iovec *bytes_iov)
917 struct tevent_req *req;
918 struct smbXcli_req_state *state;
922 if (iov_count > MAX_SMB_IOV) {
924 * Should not happen :-)
929 req = tevent_req_create(mem_ctx, &state,
930 struct smbXcli_req_state);
937 state->smb1.recv_cmd = 0xFF;
938 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
939 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
940 if (state->smb1.recv_iov == NULL) {
945 smb1cli_req_flags(conn->protocol,
946 conn->smb1.capabilities,
955 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
956 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
957 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
958 SCVAL(state->smb1.hdr, HDR_FLG, flags);
959 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
960 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
961 SSVAL(state->smb1.hdr, HDR_TID, tid);
962 SSVAL(state->smb1.hdr, HDR_PID, pid);
963 SSVAL(state->smb1.hdr, HDR_UID, uid);
964 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
965 SSVAL(state->smb1.hdr, HDR_WCT, wct);
967 state->smb1.vwv = vwv;
969 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
971 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
972 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
973 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
974 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
975 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
976 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
977 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
978 state->smb1.iov[3].iov_len = sizeof(uint16_t);
980 if (iov_count != 0) {
981 memcpy(&state->smb1.iov[4], bytes_iov,
982 iov_count * sizeof(*bytes_iov));
984 state->smb1.iov_count = iov_count + 4;
986 if (timeout_msec > 0) {
987 struct timeval endtime;
989 endtime = timeval_current_ofs_msec(timeout_msec);
990 if (!tevent_req_set_endtime(req, ev, endtime)) {
995 switch (smb_command) {
1000 state->one_way = true;
1004 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1005 state->one_way = true;
1013 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1014 struct iovec *iov, int iov_count,
1020 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1021 * iovec directly. MD5Update would do that just fine.
1024 if (iov_count < 4) {
1025 return NT_STATUS_INVALID_PARAMETER_MIX;
1027 if (iov[0].iov_len != NBT_HDR_SIZE) {
1028 return NT_STATUS_INVALID_PARAMETER_MIX;
1030 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1031 return NT_STATUS_INVALID_PARAMETER_MIX;
1033 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1034 return NT_STATUS_INVALID_PARAMETER_MIX;
1036 if (iov[3].iov_len != sizeof(uint16_t)) {
1037 return NT_STATUS_INVALID_PARAMETER_MIX;
1040 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1042 return NT_STATUS_NO_MEMORY;
1045 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
1046 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1047 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1050 return NT_STATUS_OK;
1053 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1054 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1055 TALLOC_CTX *tmp_mem,
1058 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1059 struct smbXcli_req_state *state,
1060 struct iovec *iov, int iov_count)
1062 struct tevent_req *subreq;
1066 if (!smbXcli_conn_is_connected(state->conn)) {
1067 return NT_STATUS_CONNECTION_DISCONNECTED;
1070 if (state->conn->protocol > PROTOCOL_NT1) {
1071 return NT_STATUS_REVISION_MISMATCH;
1074 if (iov_count < 4) {
1075 return NT_STATUS_INVALID_PARAMETER_MIX;
1077 if (iov[0].iov_len != NBT_HDR_SIZE) {
1078 return NT_STATUS_INVALID_PARAMETER_MIX;
1080 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1081 return NT_STATUS_INVALID_PARAMETER_MIX;
1083 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1084 return NT_STATUS_INVALID_PARAMETER_MIX;
1086 if (iov[3].iov_len != sizeof(uint16_t)) {
1087 return NT_STATUS_INVALID_PARAMETER_MIX;
1090 if (state->smb1.mid != 0) {
1091 mid = state->smb1.mid;
1093 mid = smb1cli_alloc_mid(state->conn);
1095 SSVAL(iov[1].iov_base, HDR_MID, mid);
1097 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1099 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1100 &state->smb1.seqnum);
1102 if (!NT_STATUS_IS_OK(status)) {
1107 * If we supported multiple encrytion contexts
1108 * here we'd look up based on tid.
1110 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1111 char *buf, *enc_buf;
1113 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1115 return NT_STATUS_NO_MEMORY;
1117 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1118 (char *)buf, &enc_buf);
1120 if (!NT_STATUS_IS_OK(status)) {
1121 DEBUG(0, ("Error in encrypting client message: %s\n",
1122 nt_errstr(status)));
1125 buf = (char *)talloc_memdup(state, enc_buf,
1126 smb_len_nbt(enc_buf)+4);
1129 return NT_STATUS_NO_MEMORY;
1131 iov[0].iov_base = (void *)buf;
1132 iov[0].iov_len = talloc_get_size(buf);
1136 if (state->conn->dispatch_incoming == NULL) {
1137 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1140 subreq = writev_send(state, state->ev, state->conn->outgoing,
1141 state->conn->fd, false, iov, iov_count);
1142 if (subreq == NULL) {
1143 return NT_STATUS_NO_MEMORY;
1145 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1146 return NT_STATUS_OK;
1149 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1150 struct tevent_context *ev,
1151 struct smbXcli_conn *conn,
1152 uint8_t smb_command,
1153 uint8_t additional_flags,
1154 uint8_t clear_flags,
1155 uint16_t additional_flags2,
1156 uint16_t clear_flags2,
1157 uint32_t timeout_msec,
1161 uint8_t wct, uint16_t *vwv,
1163 const uint8_t *bytes)
1165 struct tevent_req *req;
1169 iov.iov_base = discard_const_p(void, bytes);
1170 iov.iov_len = num_bytes;
1172 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1173 additional_flags, clear_flags,
1174 additional_flags2, clear_flags2,
1181 if (!tevent_req_is_in_progress(req)) {
1182 return tevent_req_post(req, ev);
1184 status = smb1cli_req_chain_submit(&req, 1);
1185 if (tevent_req_nterror(req, status)) {
1186 return tevent_req_post(req, ev);
1191 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1193 struct tevent_req *req =
1194 tevent_req_callback_data(subreq,
1196 struct smbXcli_req_state *state =
1197 tevent_req_data(req,
1198 struct smbXcli_req_state);
1202 nwritten = writev_recv(subreq, &err);
1203 TALLOC_FREE(subreq);
1204 if (nwritten == -1) {
1205 NTSTATUS status = map_nt_error_from_unix_common(err);
1206 smbXcli_conn_disconnect(state->conn, status);
1210 if (state->one_way) {
1211 state->inbuf = NULL;
1212 tevent_req_done(req);
1216 if (!smbXcli_req_set_pending(req)) {
1217 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1222 static void smbXcli_conn_received(struct tevent_req *subreq)
1224 struct smbXcli_conn *conn =
1225 tevent_req_callback_data(subreq,
1226 struct smbXcli_conn);
1227 TALLOC_CTX *frame = talloc_stackframe();
1233 if (subreq != conn->read_smb_req) {
1234 DEBUG(1, ("Internal error: cli_smb_received called with "
1235 "unexpected subreq\n"));
1236 status = NT_STATUS_INTERNAL_ERROR;
1237 smbXcli_conn_disconnect(conn, status);
1241 conn->read_smb_req = NULL;
1243 received = read_smb_recv(subreq, frame, &inbuf, &err);
1244 TALLOC_FREE(subreq);
1245 if (received == -1) {
1246 status = map_nt_error_from_unix_common(err);
1247 smbXcli_conn_disconnect(conn, status);
1252 status = conn->dispatch_incoming(conn, frame, inbuf);
1254 if (NT_STATUS_IS_OK(status)) {
1256 * We should not do any more processing
1257 * as the dispatch function called
1258 * tevent_req_done().
1261 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1263 * We got an error, so notify all pending requests
1265 smbXcli_conn_disconnect(conn, status);
1270 * We got NT_STATUS_RETRY, so we may ask for a
1271 * next incoming pdu.
1273 if (!smbXcli_conn_receive_next(conn)) {
1274 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1278 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1279 struct iovec **piov, int *pnum_iov)
1290 buflen = smb_len_nbt(buf);
1293 hdr = buf + NBT_HDR_SIZE;
1295 if (buflen < MIN_SMB_SIZE) {
1296 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1300 * This returns iovec elements in the following order:
1315 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1317 return NT_STATUS_NO_MEMORY;
1319 iov[0].iov_base = hdr;
1320 iov[0].iov_len = HDR_WCT;
1323 cmd = CVAL(hdr, HDR_COM);
1327 size_t len = buflen - taken;
1329 struct iovec *iov_tmp;
1336 * we need at least WCT and BCC
1338 needed = sizeof(uint8_t) + sizeof(uint16_t);
1340 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1341 __location__, (int)len, (int)needed));
1346 * Now we check if the specified words are there
1348 wct = CVAL(hdr, wct_ofs);
1349 needed += wct * sizeof(uint16_t);
1351 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1352 __location__, (int)len, (int)needed));
1357 * Now we check if the specified bytes are there
1359 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1360 bcc = SVAL(hdr, bcc_ofs);
1361 needed += bcc * sizeof(uint8_t);
1363 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1364 __location__, (int)len, (int)needed));
1369 * we allocate 2 iovec structures for words and bytes
1371 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1373 if (iov_tmp == NULL) {
1375 return NT_STATUS_NO_MEMORY;
1378 cur = &iov[num_iov];
1381 cur[0].iov_len = wct * sizeof(uint16_t);
1382 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1383 cur[1].iov_len = bcc * sizeof(uint8_t);
1384 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1388 if (!smb1cli_is_andx_req(cmd)) {
1390 * If the current command does not have AndX chanining
1396 if (wct == 0 && bcc == 0) {
1398 * An empty response also ends the chain,
1399 * most likely with an error.
1405 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1406 __location__, (int)wct, (int)cmd));
1409 cmd = CVAL(cur[0].iov_base, 0);
1412 * If it is the end of the chain we are also done.
1416 wct_ofs = SVAL(cur[0].iov_base, 2);
1418 if (wct_ofs < taken) {
1419 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1421 if (wct_ofs > buflen) {
1422 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1426 * we consumed everything up to the start of the next
1432 remaining = buflen - taken;
1434 if (remaining > 0 && num_iov >= 3) {
1436 * The last DATA block gets the remaining
1437 * bytes, this is needed to support
1438 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1440 iov[num_iov-1].iov_len += remaining;
1444 *pnum_iov = num_iov;
1445 return NT_STATUS_OK;
1449 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1452 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1453 TALLOC_CTX *tmp_mem,
1456 struct tevent_req *req;
1457 struct smbXcli_req_state *state;
1464 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1465 struct iovec *iov = NULL;
1467 struct tevent_req **chain = NULL;
1468 size_t num_chained = 0;
1469 size_t num_responses = 0;
1471 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1472 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1473 DEBUG(10, ("Got non-SMB PDU\n"));
1474 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1478 * If we supported multiple encrytion contexts
1479 * here we'd look up based on tid.
1481 if (common_encryption_on(conn->smb1.trans_enc)
1482 && (CVAL(inbuf, 0) == 0)) {
1483 uint16_t enc_ctx_num;
1485 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1486 if (!NT_STATUS_IS_OK(status)) {
1487 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1488 nt_errstr(status)));
1492 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1493 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1495 conn->smb1.trans_enc->enc_ctx_num));
1496 return NT_STATUS_INVALID_HANDLE;
1499 status = common_decrypt_buffer(conn->smb1.trans_enc,
1501 if (!NT_STATUS_IS_OK(status)) {
1502 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1503 nt_errstr(status)));
1508 mid = SVAL(inhdr, HDR_MID);
1509 num_pending = talloc_array_length(conn->pending);
1511 for (i=0; i<num_pending; i++) {
1512 if (mid == smb1cli_req_mid(conn->pending[i])) {
1516 if (i == num_pending) {
1517 /* Dump unexpected reply */
1518 return NT_STATUS_RETRY;
1521 oplock_break = false;
1523 if (mid == 0xffff) {
1525 * Paranoia checks that this is really an oplock break request.
1527 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1528 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1529 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1530 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1531 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1533 if (!oplock_break) {
1534 /* Dump unexpected reply */
1535 return NT_STATUS_RETRY;
1539 req = conn->pending[i];
1540 state = tevent_req_data(req, struct smbXcli_req_state);
1542 if (!oplock_break /* oplock breaks are not signed */
1543 && !smb_signing_check_pdu(conn->smb1.signing,
1544 inbuf, state->smb1.seqnum+1)) {
1545 DEBUG(10, ("cli_check_sign_mac failed\n"));
1546 return NT_STATUS_ACCESS_DENIED;
1549 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1551 if (!NT_STATUS_IS_OK(status)) {
1552 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1553 nt_errstr(status)));
1557 cmd = CVAL(inhdr, HDR_COM);
1558 status = smb1cli_pull_raw_error(inhdr);
1560 if (state->smb1.chained_requests == NULL) {
1562 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1565 smbXcli_req_unset_pending(req);
1567 state->smb1.recv_cmd = cmd;
1568 state->smb1.recv_status = status;
1569 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1571 state->smb1.recv_iov[0] = iov[0];
1572 state->smb1.recv_iov[1] = iov[1];
1573 state->smb1.recv_iov[2] = iov[2];
1575 if (talloc_array_length(conn->pending) == 0) {
1576 tevent_req_done(req);
1577 return NT_STATUS_OK;
1580 tevent_req_defer_callback(req, state->ev);
1581 tevent_req_done(req);
1582 return NT_STATUS_RETRY;
1585 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1586 num_chained = talloc_array_length(chain);
1587 num_responses = (num_iov - 1)/2;
1589 if (num_responses > num_chained) {
1590 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1593 for (i=0; i<num_chained; i++) {
1594 size_t iov_idx = 1 + (i*2);
1595 struct iovec *cur = &iov[iov_idx];
1599 state = tevent_req_data(req, struct smbXcli_req_state);
1601 smbXcli_req_unset_pending(req);
1604 * as we finish multiple requests here
1605 * we need to defer the callbacks as
1606 * they could destroy our current stack state.
1608 tevent_req_defer_callback(req, state->ev);
1610 if (i >= num_responses) {
1611 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1615 state->smb1.recv_cmd = cmd;
1617 if (i == (num_responses - 1)) {
1619 * The last request in the chain gets the status
1621 state->smb1.recv_status = status;
1623 cmd = CVAL(cur[0].iov_base, 0);
1624 state->smb1.recv_status = NT_STATUS_OK;
1627 state->inbuf = inbuf;
1630 * Note: here we use talloc_reference() in a way
1631 * that does not expose it to the caller.
1633 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1634 if (tevent_req_nomem(inbuf_ref, req)) {
1638 /* copy the related buffers */
1639 state->smb1.recv_iov[0] = iov[0];
1640 state->smb1.recv_iov[1] = cur[0];
1641 state->smb1.recv_iov[2] = cur[1];
1643 tevent_req_done(req);
1646 return NT_STATUS_RETRY;
1649 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1650 TALLOC_CTX *mem_ctx,
1651 struct iovec **piov,
1655 uint32_t *pvwv_offset,
1656 uint32_t *pnum_bytes,
1658 uint32_t *pbytes_offset,
1660 const struct smb1cli_req_expected_response *expected,
1661 size_t num_expected)
1663 struct smbXcli_req_state *state =
1664 tevent_req_data(req,
1665 struct smbXcli_req_state);
1666 NTSTATUS status = NT_STATUS_OK;
1667 struct iovec *recv_iov = NULL;
1668 uint8_t *hdr = NULL;
1670 uint32_t vwv_offset = 0;
1671 uint16_t *vwv = NULL;
1672 uint32_t num_bytes = 0;
1673 uint32_t bytes_offset = 0;
1674 uint8_t *bytes = NULL;
1676 bool found_status = false;
1677 bool found_size = false;
1691 if (pvwv_offset != NULL) {
1694 if (pnum_bytes != NULL) {
1697 if (pbytes != NULL) {
1700 if (pbytes_offset != NULL) {
1703 if (pinbuf != NULL) {
1707 if (state->inbuf != NULL) {
1708 recv_iov = state->smb1.recv_iov;
1709 hdr = (uint8_t *)recv_iov[0].iov_base;
1710 wct = recv_iov[1].iov_len/2;
1711 vwv = (uint16_t *)recv_iov[1].iov_base;
1712 vwv_offset = PTR_DIFF(vwv, hdr);
1713 num_bytes = recv_iov[2].iov_len;
1714 bytes = (uint8_t *)recv_iov[2].iov_base;
1715 bytes_offset = PTR_DIFF(bytes, hdr);
1718 if (tevent_req_is_nterror(req, &status)) {
1719 for (i=0; i < num_expected; i++) {
1720 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1721 found_status = true;
1727 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1733 if (num_expected == 0) {
1734 found_status = true;
1738 status = state->smb1.recv_status;
1740 for (i=0; i < num_expected; i++) {
1741 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1745 found_status = true;
1746 if (expected[i].wct == 0) {
1751 if (expected[i].wct == wct) {
1757 if (!found_status) {
1762 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1766 *piov = talloc_move(mem_ctx, &recv_iov);
1778 if (pvwv_offset != NULL) {
1779 *pvwv_offset = vwv_offset;
1781 if (pnum_bytes != NULL) {
1782 *pnum_bytes = num_bytes;
1784 if (pbytes != NULL) {
1787 if (pbytes_offset != NULL) {
1788 *pbytes_offset = bytes_offset;
1790 if (pinbuf != NULL) {
1791 *pinbuf = state->inbuf;
1797 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1804 for (i=0; i<num_reqs; i++) {
1805 struct smbXcli_req_state *state;
1806 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1807 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1808 state->smb1.iov_count-2);
1809 wct_ofs = (wct_ofs + 3) & ~3;
1814 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1816 struct smbXcli_req_state *first_state =
1817 tevent_req_data(reqs[0],
1818 struct smbXcli_req_state);
1819 struct smbXcli_req_state *state;
1821 size_t chain_padding = 0;
1823 struct iovec *iov = NULL;
1824 struct iovec *this_iov;
1828 if (num_reqs == 1) {
1829 return smb1cli_req_writev_submit(reqs[0], first_state,
1830 first_state->smb1.iov,
1831 first_state->smb1.iov_count);
1835 for (i=0; i<num_reqs; i++) {
1836 if (!tevent_req_is_in_progress(reqs[i])) {
1837 return NT_STATUS_INTERNAL_ERROR;
1840 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1842 if (state->smb1.iov_count < 4) {
1843 return NT_STATUS_INVALID_PARAMETER_MIX;
1848 * The NBT and SMB header
1861 iovlen += state->smb1.iov_count - 2;
1864 iov = talloc_zero_array(first_state, struct iovec, iovlen);
1866 return NT_STATUS_NO_MEMORY;
1869 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1870 first_state, reqs, sizeof(*reqs) * num_reqs);
1871 if (first_state->smb1.chained_requests == NULL) {
1873 return NT_STATUS_NO_MEMORY;
1876 wct_offset = HDR_WCT;
1879 for (i=0; i<num_reqs; i++) {
1880 size_t next_padding = 0;
1883 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1885 if (i < num_reqs-1) {
1886 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1887 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1889 TALLOC_FREE(first_state->smb1.chained_requests);
1890 return NT_STATUS_INVALID_PARAMETER_MIX;
1894 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1895 state->smb1.iov_count-2) + 1;
1896 if ((wct_offset % 4) != 0) {
1897 next_padding = 4 - (wct_offset % 4);
1899 wct_offset += next_padding;
1900 vwv = state->smb1.vwv;
1902 if (i < num_reqs-1) {
1903 struct smbXcli_req_state *next_state =
1904 tevent_req_data(reqs[i+1],
1905 struct smbXcli_req_state);
1906 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1908 SSVAL(vwv+1, 0, wct_offset);
1909 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1910 /* properly end the chain */
1911 SCVAL(vwv+0, 0, 0xff);
1912 SCVAL(vwv+0, 1, 0xff);
1918 * The NBT and SMB header
1920 this_iov[0] = state->smb1.iov[0];
1921 this_iov[1] = state->smb1.iov[1];
1925 * This one is a bit subtle. We have to add
1926 * chain_padding bytes between the requests, and we
1927 * have to also include the wct field of the
1928 * subsequent requests. We use the subsequent header
1929 * for the padding, it contains the wct field in its
1932 this_iov[0].iov_len = chain_padding+1;
1933 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1934 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1935 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1940 * copy the words and bytes
1942 memcpy(this_iov, state->smb1.iov+2,
1943 sizeof(struct iovec) * (state->smb1.iov_count-2));
1944 this_iov += state->smb1.iov_count - 2;
1945 chain_padding = next_padding;
1948 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1949 if (nbt_len > first_state->conn->smb1.max_xmit) {
1951 TALLOC_FREE(first_state->smb1.chained_requests);
1952 return NT_STATUS_INVALID_PARAMETER_MIX;
1955 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
1956 if (!NT_STATUS_IS_OK(status)) {
1958 TALLOC_FREE(first_state->smb1.chained_requests);
1962 return NT_STATUS_OK;
1965 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1967 return ((tevent_queue_length(conn->outgoing) != 0)
1968 || (talloc_array_length(conn->pending) != 0));
1971 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
1973 return conn->smb2.server.capabilities;
1976 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
1978 return conn->smb2.server.security_mode;
1981 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
1983 return conn->smb2.server.max_trans_size;
1986 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
1988 return conn->smb2.server.max_read_size;
1991 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
1993 return conn->smb2.server.max_write_size;
1996 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
1997 uint16_t max_credits)
1999 conn->smb2.max_credits = max_credits;
2002 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2003 struct tevent_context *ev,
2004 struct smbXcli_conn *conn,
2006 uint32_t additional_flags,
2007 uint32_t clear_flags,
2008 uint32_t timeout_msec,
2011 struct smbXcli_session *session,
2012 const uint8_t *fixed,
2017 struct tevent_req *req;
2018 struct smbXcli_req_state *state;
2022 req = tevent_req_create(mem_ctx, &state,
2023 struct smbXcli_req_state);
2030 state->session = session;
2033 uid = session->smb2.session_id;
2036 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2037 if (state->smb2.recv_iov == NULL) {
2042 flags |= additional_flags;
2043 flags &= ~clear_flags;
2045 state->smb2.fixed = fixed;
2046 state->smb2.fixed_len = fixed_len;
2047 state->smb2.dyn = dyn;
2048 state->smb2.dyn_len = dyn_len;
2050 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2051 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2052 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2053 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2054 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2055 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2056 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2059 case SMB2_OP_CANCEL:
2060 state->one_way = true;
2064 * If this is a dummy request, it will have
2065 * UINT64_MAX as message id.
2066 * If we send on break acknowledgement,
2067 * this gets overwritten later.
2069 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2073 if (timeout_msec > 0) {
2074 struct timeval endtime;
2076 endtime = timeval_current_ofs_msec(timeout_msec);
2077 if (!tevent_req_set_endtime(req, ev, endtime)) {
2085 static void smb2cli_writev_done(struct tevent_req *subreq);
2086 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2087 TALLOC_CTX *tmp_mem,
2090 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2093 struct smbXcli_req_state *state;
2094 struct tevent_req *subreq;
2096 int i, num_iov, nbt_len;
2099 * 1 for the nbt length
2100 * per request: HDR, fixed, dyn, padding
2101 * -1 because the last one does not need padding
2104 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2106 return NT_STATUS_NO_MEMORY;
2112 for (i=0; i<num_reqs; i++) {
2121 if (!tevent_req_is_in_progress(reqs[i])) {
2122 return NT_STATUS_INTERNAL_ERROR;
2125 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2127 if (!smbXcli_conn_is_connected(state->conn)) {
2128 return NT_STATUS_CONNECTION_DISCONNECTED;
2131 if ((state->conn->protocol != PROTOCOL_NONE) &&
2132 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2133 return NT_STATUS_REVISION_MISMATCH;
2136 avail = UINT64_MAX - state->conn->smb2.mid;
2138 return NT_STATUS_CONNECTION_ABORTED;
2141 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2142 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2147 charge = MAX(state->smb2.credit_charge, charge);
2149 avail = MIN(avail, state->conn->smb2.cur_credits);
2150 if (avail < charge) {
2151 return NT_STATUS_INTERNAL_ERROR;
2155 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2156 credits = state->conn->smb2.max_credits -
2157 state->conn->smb2.cur_credits;
2159 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2163 mid = state->conn->smb2.mid;
2164 state->conn->smb2.mid += charge;
2165 state->conn->smb2.cur_credits -= charge;
2167 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2168 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2170 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2171 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2174 iov[num_iov].iov_base = state->smb2.hdr;
2175 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2178 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2179 iov[num_iov].iov_len = state->smb2.fixed_len;
2182 if (state->smb2.dyn != NULL) {
2183 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2184 iov[num_iov].iov_len = state->smb2.dyn_len;
2188 reqlen = sizeof(state->smb2.hdr);
2189 reqlen += state->smb2.fixed_len;
2190 reqlen += state->smb2.dyn_len;
2192 if (i < num_reqs-1) {
2193 if ((reqlen % 8) > 0) {
2194 uint8_t pad = 8 - (reqlen % 8);
2195 iov[num_iov].iov_base = state->smb2.pad;
2196 iov[num_iov].iov_len = pad;
2200 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2204 if (state->session && state->session->smb2.should_sign) {
2207 status = smb2_signing_sign_pdu(state->session->smb2.signing_key,
2208 &iov[hdr_iov], num_iov - hdr_iov);
2209 if (!NT_STATUS_IS_OK(status)) {
2214 ret = smbXcli_req_set_pending(reqs[i]);
2216 return NT_STATUS_NO_MEMORY;
2220 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2221 _smb_setlen_tcp(state->length_hdr, nbt_len);
2222 iov[0].iov_base = state->length_hdr;
2223 iov[0].iov_len = sizeof(state->length_hdr);
2225 if (state->conn->dispatch_incoming == NULL) {
2226 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2229 subreq = writev_send(state, state->ev, state->conn->outgoing,
2230 state->conn->fd, false, iov, num_iov);
2231 if (subreq == NULL) {
2232 return NT_STATUS_NO_MEMORY;
2234 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2235 return NT_STATUS_OK;
2238 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2240 struct smbXcli_req_state *state =
2241 tevent_req_data(req,
2242 struct smbXcli_req_state);
2244 state->smb2.credit_charge = charge;
2247 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2248 struct tevent_context *ev,
2249 struct smbXcli_conn *conn,
2251 uint32_t additional_flags,
2252 uint32_t clear_flags,
2253 uint32_t timeout_msec,
2256 struct smbXcli_session *session,
2257 const uint8_t *fixed,
2262 struct tevent_req *req;
2265 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2266 additional_flags, clear_flags,
2269 fixed, fixed_len, dyn, dyn_len);
2273 if (!tevent_req_is_in_progress(req)) {
2274 return tevent_req_post(req, ev);
2276 status = smb2cli_req_compound_submit(&req, 1);
2277 if (tevent_req_nterror(req, status)) {
2278 return tevent_req_post(req, ev);
2283 static void smb2cli_writev_done(struct tevent_req *subreq)
2285 struct tevent_req *req =
2286 tevent_req_callback_data(subreq,
2288 struct smbXcli_req_state *state =
2289 tevent_req_data(req,
2290 struct smbXcli_req_state);
2294 nwritten = writev_recv(subreq, &err);
2295 TALLOC_FREE(subreq);
2296 if (nwritten == -1) {
2297 /* here, we need to notify all pending requests */
2298 NTSTATUS status = map_nt_error_from_unix_common(err);
2299 smbXcli_conn_disconnect(state->conn, status);
2304 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2305 struct iovec **piov, int *pnum_iov)
2315 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2317 return NT_STATUS_NO_MEMORY;
2320 buflen = smb_len_tcp(buf);
2322 first_hdr = buf + NBT_HDR_SIZE;
2324 while (taken < buflen) {
2325 size_t len = buflen - taken;
2326 uint8_t *hdr = first_hdr + taken;
2329 size_t next_command_ofs;
2331 struct iovec *iov_tmp;
2334 * We need the header plus the body length field
2337 if (len < SMB2_HDR_BODY + 2) {
2338 DEBUG(10, ("%d bytes left, expected at least %d\n",
2339 (int)len, SMB2_HDR_BODY));
2342 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2343 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2347 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2348 DEBUG(10, ("Got HDR len %d, expected %d\n",
2349 SVAL(hdr, 4), SMB2_HDR_BODY));
2354 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2355 body_size = SVAL(hdr, SMB2_HDR_BODY);
2357 if (next_command_ofs != 0) {
2358 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2361 if (next_command_ofs > full_size) {
2364 full_size = next_command_ofs;
2366 if (body_size < 2) {
2369 body_size &= 0xfffe;
2371 if (body_size > (full_size - SMB2_HDR_BODY)) {
2375 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2377 if (iov_tmp == NULL) {
2379 return NT_STATUS_NO_MEMORY;
2382 cur = &iov[num_iov];
2385 cur[0].iov_base = hdr;
2386 cur[0].iov_len = SMB2_HDR_BODY;
2387 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2388 cur[1].iov_len = body_size;
2389 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2390 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2396 *pnum_iov = num_iov;
2397 return NT_STATUS_OK;
2401 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2404 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2407 size_t num_pending = talloc_array_length(conn->pending);
2410 for (i=0; i<num_pending; i++) {
2411 struct tevent_req *req = conn->pending[i];
2412 struct smbXcli_req_state *state =
2413 tevent_req_data(req,
2414 struct smbXcli_req_state);
2416 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2423 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2424 TALLOC_CTX *tmp_mem,
2427 struct tevent_req *req;
2428 struct smbXcli_req_state *state = NULL;
2433 struct smbXcli_session *last_session = NULL;
2435 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2437 if (!NT_STATUS_IS_OK(status)) {
2441 for (i=0; i<num_iov; i+=3) {
2442 uint8_t *inbuf_ref = NULL;
2443 struct iovec *cur = &iov[i];
2444 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2445 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2446 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2447 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2448 uint16_t req_opcode;
2450 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2451 uint32_t new_credits;
2452 struct smbXcli_session *session = NULL;
2453 const DATA_BLOB *signing_key = NULL;
2455 new_credits = conn->smb2.cur_credits;
2456 new_credits += credits;
2457 if (new_credits > UINT16_MAX) {
2458 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2460 conn->smb2.cur_credits += credits;
2462 req = smb2cli_conn_find_pending(conn, mid);
2464 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2466 state = tevent_req_data(req, struct smbXcli_req_state);
2468 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2469 if (opcode != req_opcode) {
2470 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2472 req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2474 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2475 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2478 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2479 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2480 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2481 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2484 * async interim responses are not signed,
2485 * even if the SMB2_HDR_FLAG_SIGNED flag
2488 req_flags |= SMB2_HDR_FLAG_ASYNC;
2489 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2490 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2494 session = state->session;
2495 if (req_flags & SMB2_HDR_FLAG_CHAINED) {
2496 session = last_session;
2498 last_session = session;
2500 if (session && session->smb2.should_sign) {
2501 if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
2502 return NT_STATUS_ACCESS_DENIED;
2506 if (flags & SMB2_HDR_FLAG_SIGNED) {
2507 uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2509 if (session == NULL) {
2510 struct smbXcli_session *s;
2512 s = state->conn->sessions;
2513 for (; s; s = s->next) {
2514 if (s->smb2.session_id != uid) {
2523 if (session == NULL) {
2524 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2527 last_session = session;
2528 signing_key = &session->smb2.signing_key;
2531 if ((opcode == SMB2_OP_SESSSETUP) &&
2532 NT_STATUS_IS_OK(status)) {
2534 * the caller has to check the signing
2535 * as only the caller knows the correct
2541 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
2543 * if the server returns NT_STATUS_USER_SESSION_DELETED
2544 * the response is not signed and we should
2545 * propagate the NT_STATUS_USER_SESSION_DELETED
2546 * status to the caller.
2553 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) ||
2554 NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) ||
2555 NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) {
2557 * if the server returns
2558 * NT_STATUS_NETWORK_NAME_DELETED
2559 * NT_STATUS_FILE_CLOSED
2560 * NT_STATUS_INVALID_PARAMETER
2561 * the response might not be signed
2562 * as this happens before the signing checks.
2564 * If server echos the signature (or all zeros)
2565 * we should report the status from the server
2571 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2572 state->smb2.hdr+SMB2_HDR_SIGNATURE,
2575 state->smb2.signing_skipped = true;
2581 static const uint8_t zeros[16];
2583 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2587 state->smb2.signing_skipped = true;
2594 status = smb2_signing_check_pdu(*signing_key, cur, 3);
2595 if (!NT_STATUS_IS_OK(status)) {
2597 * If the signing check fails, we disconnect
2604 smbXcli_req_unset_pending(req);
2607 * There might be more than one response
2608 * we need to defer the notifications
2610 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2615 tevent_req_defer_callback(req, state->ev);
2619 * Note: here we use talloc_reference() in a way
2620 * that does not expose it to the caller.
2622 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2623 if (tevent_req_nomem(inbuf_ref, req)) {
2627 /* copy the related buffers */
2628 state->smb2.recv_iov[0] = cur[0];
2629 state->smb2.recv_iov[1] = cur[1];
2630 state->smb2.recv_iov[2] = cur[2];
2632 tevent_req_done(req);
2636 return NT_STATUS_RETRY;
2639 return NT_STATUS_OK;
2642 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2643 struct iovec **piov,
2644 const struct smb2cli_req_expected_response *expected,
2645 size_t num_expected)
2647 struct smbXcli_req_state *state =
2648 tevent_req_data(req,
2649 struct smbXcli_req_state);
2652 bool found_status = false;
2653 bool found_size = false;
2660 if (tevent_req_is_nterror(req, &status)) {
2661 for (i=0; i < num_expected; i++) {
2662 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2663 found_status = true;
2669 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2675 if (num_expected == 0) {
2676 found_status = true;
2680 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2681 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2683 for (i=0; i < num_expected; i++) {
2684 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2688 found_status = true;
2689 if (expected[i].body_size == 0) {
2694 if (expected[i].body_size == body_size) {
2700 if (!found_status) {
2704 if (state->smb2.signing_skipped) {
2705 if (num_expected > 0) {
2706 return NT_STATUS_ACCESS_DENIED;
2708 if (!NT_STATUS_IS_ERR(status)) {
2709 return NT_STATUS_ACCESS_DENIED;
2714 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2718 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
2724 static const struct {
2725 enum protocol_types proto;
2726 const char *smb1_name;
2727 } smb1cli_prots[] = {
2728 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
2729 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
2730 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
2731 {PROTOCOL_LANMAN1, "LANMAN1.0"},
2732 {PROTOCOL_LANMAN2, "LM1.2X002"},
2733 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
2734 {PROTOCOL_LANMAN2, "LANMAN2.1"},
2735 {PROTOCOL_LANMAN2, "Samba"},
2736 {PROTOCOL_NT1, "NT LANMAN 1.0"},
2737 {PROTOCOL_NT1, "NT LM 0.12"},
2738 {PROTOCOL_SMB2_02, "SMB 2.002"},
2739 {PROTOCOL_SMB2_10, "SMB 2.???"},
2742 static const struct {
2743 enum protocol_types proto;
2744 uint16_t smb2_dialect;
2745 } smb2cli_prots[] = {
2746 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
2747 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
2748 {PROTOCOL_SMB2_22, SMB2_DIALECT_REVISION_222},
2751 struct smbXcli_negprot_state {
2752 struct smbXcli_conn *conn;
2753 struct tevent_context *ev;
2754 uint32_t timeout_msec;
2755 enum protocol_types min_protocol;
2756 enum protocol_types max_protocol;
2760 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
2764 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
2765 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
2766 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
2767 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
2768 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
2769 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
2773 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
2774 struct tevent_context *ev,
2775 struct smbXcli_conn *conn,
2776 uint32_t timeout_msec,
2777 enum protocol_types min_protocol,
2778 enum protocol_types max_protocol)
2780 struct tevent_req *req, *subreq;
2781 struct smbXcli_negprot_state *state;
2783 req = tevent_req_create(mem_ctx, &state,
2784 struct smbXcli_negprot_state);
2790 state->timeout_msec = timeout_msec;
2791 state->min_protocol = min_protocol;
2792 state->max_protocol = max_protocol;
2794 if (min_protocol == PROTOCOL_NONE) {
2795 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2796 return tevent_req_post(req, ev);
2799 if (max_protocol == PROTOCOL_NONE) {
2800 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2801 return tevent_req_post(req, ev);
2804 if (min_protocol > max_protocol) {
2805 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2806 return tevent_req_post(req, ev);
2809 if ((min_protocol < PROTOCOL_SMB2_02) &&
2810 (max_protocol < PROTOCOL_SMB2_02)) {
2814 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
2816 subreq = smbXcli_negprot_smb1_subreq(state);
2817 if (tevent_req_nomem(subreq, req)) {
2818 return tevent_req_post(req, ev);
2820 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
2824 if ((min_protocol >= PROTOCOL_SMB2_02) &&
2825 (max_protocol >= PROTOCOL_SMB2_02)) {
2829 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2831 subreq = smbXcli_negprot_smb2_subreq(state);
2832 if (tevent_req_nomem(subreq, req)) {
2833 return tevent_req_post(req, ev);
2835 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
2840 * We send an SMB1 negprot with the SMB2 dialects
2841 * and expect a SMB1 or a SMB2 response.
2843 * smbXcli_negprot_dispatch_incoming() will fix the
2844 * callback to match protocol of the response.
2846 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
2848 subreq = smbXcli_negprot_smb1_subreq(state);
2849 if (tevent_req_nomem(subreq, req)) {
2850 return tevent_req_post(req, ev);
2852 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
2856 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
2858 struct tevent_req *req =
2859 tevent_req_callback_data(subreq,
2864 * we just want the low level error
2866 status = tevent_req_simple_recv_ntstatus(subreq);
2867 TALLOC_FREE(subreq);
2868 if (tevent_req_nterror(req, status)) {
2872 /* this should never happen */
2873 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2876 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
2879 DATA_BLOB bytes = data_blob_null;
2883 /* setup the protocol strings */
2884 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2888 if (smb1cli_prots[i].proto < state->min_protocol) {
2892 if (smb1cli_prots[i].proto > state->max_protocol) {
2896 ok = data_blob_append(state, &bytes, &c, sizeof(c));
2902 * We now it is already ascii and
2903 * we want NULL termination.
2905 ok = data_blob_append(state, &bytes,
2906 smb1cli_prots[i].smb1_name,
2907 strlen(smb1cli_prots[i].smb1_name)+1);
2913 smb1cli_req_flags(state->max_protocol,
2914 state->conn->smb1.client.capabilities,
2919 return smb1cli_req_send(state, state->ev, state->conn,
2923 state->timeout_msec,
2924 0xFFFE, 0, 0, /* pid, tid, uid */
2925 0, NULL, /* wct, vwv */
2926 bytes.length, bytes.data);
2929 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
2931 struct tevent_req *req =
2932 tevent_req_callback_data(subreq,
2934 struct smbXcli_negprot_state *state =
2935 tevent_req_data(req,
2936 struct smbXcli_negprot_state);
2937 struct smbXcli_conn *conn = state->conn;
2938 struct iovec *recv_iov = NULL;
2947 size_t num_prots = 0;
2949 uint32_t client_capabilities = conn->smb1.client.capabilities;
2950 uint32_t both_capabilities;
2951 uint32_t server_capabilities = 0;
2952 uint32_t capabilities;
2953 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
2954 uint32_t server_max_xmit = 0;
2956 uint32_t server_max_mux = 0;
2957 uint16_t server_security_mode = 0;
2958 uint32_t server_session_key = 0;
2959 bool server_readbraw = false;
2960 bool server_writebraw = false;
2961 bool server_lockread = false;
2962 bool server_writeunlock = false;
2963 struct GUID server_guid = GUID_zero();
2964 DATA_BLOB server_gss_blob = data_blob_null;
2965 uint8_t server_challenge[8];
2966 char *server_workgroup = NULL;
2967 char *server_name = NULL;
2968 int server_time_zone = 0;
2969 NTTIME server_system_time = 0;
2970 static const struct smb1cli_req_expected_response expected[] = {
2972 .status = NT_STATUS_OK,
2973 .wct = 0x11, /* NT1 */
2976 .status = NT_STATUS_OK,
2977 .wct = 0x0D, /* LM */
2980 .status = NT_STATUS_OK,
2981 .wct = 0x01, /* CORE */
2985 ZERO_STRUCT(server_challenge);
2987 status = smb1cli_req_recv(subreq, state,
2992 NULL, /* pvwv_offset */
2995 NULL, /* pbytes_offset */
2997 expected, ARRAY_SIZE(expected));
2998 TALLOC_FREE(subreq);
2999 if (tevent_req_nterror(req, status)) {
3003 flags = CVAL(inhdr, HDR_FLG);
3005 protnum = SVAL(vwv, 0);
3007 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3008 if (smb1cli_prots[i].proto < state->min_protocol) {
3012 if (smb1cli_prots[i].proto > state->max_protocol) {
3016 if (protnum != num_prots) {
3021 conn->protocol = smb1cli_prots[i].proto;
3025 if (conn->protocol == PROTOCOL_NONE) {
3026 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3030 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
3031 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
3032 "and the selected protocol level doesn't support it.\n"));
3033 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3037 if (flags & FLAG_SUPPORT_LOCKREAD) {
3038 server_lockread = true;
3039 server_writeunlock = true;
3042 if (conn->protocol >= PROTOCOL_NT1) {
3043 const char *client_signing = NULL;
3044 bool server_mandatory = false;
3045 bool server_allowed = false;
3046 const char *server_signing = NULL;
3051 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3056 server_security_mode = CVAL(vwv + 1, 0);
3057 server_max_mux = SVAL(vwv + 1, 1);
3058 server_max_xmit = IVAL(vwv + 3, 1);
3059 server_session_key = IVAL(vwv + 7, 1);
3060 server_time_zone = SVALS(vwv + 15, 1);
3061 server_time_zone *= 60;
3062 /* this time arrives in real GMT */
3063 server_system_time = BVAL(vwv + 11, 1);
3064 server_capabilities = IVAL(vwv + 9, 1);
3066 key_len = CVAL(vwv + 16, 1);
3068 if (server_capabilities & CAP_RAW_MODE) {
3069 server_readbraw = true;
3070 server_writebraw = true;
3072 if (server_capabilities & CAP_LOCK_AND_READ) {
3073 server_lockread = true;
3076 if (server_capabilities & CAP_EXTENDED_SECURITY) {
3077 DATA_BLOB blob1, blob2;
3079 if (num_bytes < 16) {
3080 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3084 blob1 = data_blob_const(bytes, 16);
3085 status = GUID_from_data_blob(&blob1, &server_guid);
3086 if (tevent_req_nterror(req, status)) {
3090 blob1 = data_blob_const(bytes+16, num_bytes-16);
3091 blob2 = data_blob_dup_talloc(state, blob1);
3092 if (blob1.length > 0 &&
3093 tevent_req_nomem(blob2.data, req)) {
3096 server_gss_blob = blob2;
3098 DATA_BLOB blob1, blob2;
3100 if (num_bytes < key_len) {
3101 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3105 if (key_len != 0 && key_len != 8) {
3106 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3111 memcpy(server_challenge, bytes, 8);
3114 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3115 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
3116 if (blob1.length > 0) {
3119 len = utf16_len_n(blob1.data,
3123 ok = convert_string_talloc(state,
3131 status = map_nt_error_from_unix_common(errno);
3132 tevent_req_nterror(req, status);
3137 blob2.data += blob1.length;
3138 blob2.length -= blob1.length;
3139 if (blob2.length > 0) {
3142 len = utf16_len_n(blob1.data,
3146 ok = convert_string_talloc(state,
3154 status = map_nt_error_from_unix_common(errno);
3155 tevent_req_nterror(req, status);
3161 client_signing = "disabled";
3162 if (conn->allow_signing) {
3163 client_signing = "allowed";
3165 if (conn->mandatory_signing) {
3166 client_signing = "required";
3169 server_signing = "not supported";
3170 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3171 server_signing = "supported";
3172 server_allowed = true;
3174 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3175 server_signing = "required";
3176 server_mandatory = true;
3179 ok = smb_signing_set_negotiated(conn->smb1.signing,
3183 DEBUG(1,("cli_negprot: SMB signing is required, "
3184 "but client[%s] and server[%s] mismatch\n",
3185 client_signing, server_signing));
3186 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3190 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3196 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3200 server_security_mode = SVAL(vwv + 1, 0);
3201 server_max_xmit = SVAL(vwv + 2, 0);
3202 server_max_mux = SVAL(vwv + 3, 0);
3203 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3204 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3205 server_session_key = IVAL(vwv + 6, 0);
3206 server_time_zone = SVALS(vwv + 10, 0);
3207 server_time_zone *= 60;
3208 /* this time is converted to GMT by make_unix_date */
3209 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3210 unix_to_nt_time(&server_system_time, t);
3211 key_len = SVAL(vwv + 11, 0);
3213 if (num_bytes < key_len) {
3214 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3218 if (key_len != 0 && key_len != 8) {
3219 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3224 memcpy(server_challenge, bytes, 8);
3227 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3228 if (blob1.length > 0) {
3232 len = utf16_len_n(blob1.data,
3236 ok = convert_string_talloc(state,
3244 status = map_nt_error_from_unix_common(errno);
3245 tevent_req_nterror(req, status);
3251 /* the old core protocol */
3252 server_time_zone = get_time_zone(time(NULL));
3253 server_max_xmit = 1024;
3257 if (server_max_xmit < 1024) {
3258 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3262 if (server_max_mux < 1) {
3263 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3268 * Now calculate the negotiated capabilities
3269 * based on the mask for:
3270 * - client only flags
3271 * - flags used in both directions
3272 * - server only flags
3274 both_capabilities = client_capabilities & server_capabilities;
3275 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3276 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3277 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3279 max_xmit = MIN(client_max_xmit, server_max_xmit);
3281 conn->smb1.server.capabilities = server_capabilities;
3282 conn->smb1.capabilities = capabilities;
3284 conn->smb1.server.max_xmit = server_max_xmit;
3285 conn->smb1.max_xmit = max_xmit;
3287 conn->smb1.server.max_mux = server_max_mux;
3289 conn->smb1.server.security_mode = server_security_mode;
3291 conn->smb1.server.readbraw = server_readbraw;
3292 conn->smb1.server.writebraw = server_writebraw;
3293 conn->smb1.server.lockread = server_lockread;
3294 conn->smb1.server.writeunlock = server_writeunlock;
3296 conn->smb1.server.session_key = server_session_key;
3298 talloc_steal(conn, server_gss_blob.data);
3299 conn->smb1.server.gss_blob = server_gss_blob;
3300 conn->smb1.server.guid = server_guid;
3301 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3302 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3303 conn->smb1.server.name = talloc_move(conn, &server_name);
3305 conn->smb1.server.time_zone = server_time_zone;
3306 conn->smb1.server.system_time = server_system_time;
3308 tevent_req_done(req);
3311 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3315 uint16_t dialect_count = 0;
3317 buf = state->smb2.dyn;
3318 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3319 if (smb2cli_prots[i].proto < state->min_protocol) {
3323 if (smb2cli_prots[i].proto > state->max_protocol) {
3327 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3331 buf = state->smb2.fixed;
3333 SSVAL(buf, 2, dialect_count);
3334 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3335 SSVAL(buf, 6, 0); /* Reserved */
3336 SSVAL(buf, 8, 0); /* Capabilities */
3337 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3341 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3343 if (!NT_STATUS_IS_OK(status)) {
3346 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3348 memset(buf+12, 0, 16); /* ClientGuid */
3350 SBVAL(buf, 28, 0); /* ClientStartTime */
3352 return smb2cli_req_send(state, state->ev,
3353 state->conn, SMB2_OP_NEGPROT,
3355 state->timeout_msec,
3356 0xFEFF, 0, NULL, /* pid, tid, session */
3357 state->smb2.fixed, sizeof(state->smb2.fixed),
3358 state->smb2.dyn, dialect_count*2);
3361 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3363 struct tevent_req *req =
3364 tevent_req_callback_data(subreq,
3366 struct smbXcli_negprot_state *state =
3367 tevent_req_data(req,
3368 struct smbXcli_negprot_state);
3369 struct smbXcli_conn *conn = state->conn;
3370 size_t security_offset, security_length;
3376 uint16_t dialect_revision;
3377 static const struct smb2cli_req_expected_response expected[] = {
3379 .status = NT_STATUS_OK,
3384 status = smb2cli_req_recv(subreq, state, &iov,
3385 expected, ARRAY_SIZE(expected));
3386 TALLOC_FREE(subreq);
3387 if (tevent_req_nterror(req, status)) {
3391 body = (uint8_t *)iov[1].iov_base;
3393 dialect_revision = SVAL(body, 4);
3395 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3396 if (smb2cli_prots[i].proto < state->min_protocol) {
3400 if (smb2cli_prots[i].proto > state->max_protocol) {
3404 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3408 conn->protocol = smb2cli_prots[i].proto;
3412 if (conn->protocol == PROTOCOL_NONE) {
3413 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3414 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3418 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3419 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3423 /* make sure we do not loop forever */
3424 state->min_protocol = PROTOCOL_SMB2_02;
3427 * send a SMB2 negprot, in order to negotiate
3428 * the SMB2 dialect. This needs to use the
3431 state->conn->smb2.mid = 1;
3432 subreq = smbXcli_negprot_smb2_subreq(state);
3433 if (tevent_req_nomem(subreq, req)) {
3436 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3440 conn->smb2.server.security_mode = SVAL(body, 2);
3442 blob = data_blob_const(body + 8, 16);
3443 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3444 if (tevent_req_nterror(req, status)) {
3448 conn->smb2.server.capabilities = IVAL(body, 24);
3449 conn->smb2.server.max_trans_size= IVAL(body, 28);
3450 conn->smb2.server.max_read_size = IVAL(body, 32);
3451 conn->smb2.server.max_write_size= IVAL(body, 36);
3452 conn->smb2.server.system_time = BVAL(body, 40);
3453 conn->smb2.server.start_time = BVAL(body, 48);
3455 security_offset = SVAL(body, 56);
3456 security_length = SVAL(body, 58);
3458 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3459 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3463 if (security_length > iov[2].iov_len) {
3464 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3468 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3471 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3475 tevent_req_done(req);
3478 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3479 TALLOC_CTX *tmp_mem,
3482 size_t num_pending = talloc_array_length(conn->pending);
3483 struct tevent_req *subreq;
3484 struct smbXcli_req_state *substate;
3485 struct tevent_req *req;
3486 struct smbXcli_negprot_state *state;
3487 uint32_t protocol_magic = IVAL(inbuf, 4);
3489 if (num_pending != 1) {
3490 return NT_STATUS_INTERNAL_ERROR;
3493 subreq = conn->pending[0];
3494 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3495 req = tevent_req_callback_data(subreq, struct tevent_req);
3496 state = tevent_req_data(req, struct smbXcli_negprot_state);
3498 switch (protocol_magic) {
3500 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3501 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3502 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3505 if (substate->smb2.recv_iov == NULL) {
3507 * For the SMB1 negprot we have move it.
3509 substate->smb2.recv_iov = substate->smb1.recv_iov;
3510 substate->smb1.recv_iov = NULL;
3513 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3514 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3515 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3518 DEBUG(10, ("Got non-SMB PDU\n"));
3519 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3522 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3524 return tevent_req_simple_recv_ntstatus(req);
3527 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3528 uint32_t timeout_msec,
3529 enum protocol_types min_protocol,
3530 enum protocol_types max_protocol)
3532 TALLOC_CTX *frame = talloc_stackframe();
3533 struct tevent_context *ev;
3534 struct tevent_req *req;
3535 NTSTATUS status = NT_STATUS_NO_MEMORY;
3538 if (smbXcli_conn_has_async_calls(conn)) {
3540 * Can't use sync call while an async call is in flight
3542 status = NT_STATUS_INVALID_PARAMETER_MIX;
3545 ev = tevent_context_init(frame);
3549 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3550 min_protocol, max_protocol);
3554 ok = tevent_req_poll(req, ev);
3556 status = map_nt_error_from_unix_common(errno);
3559 status = smbXcli_negprot_recv(req);
3565 static int smbXcli_session_destructor(struct smbXcli_session *session)
3567 if (session->conn == NULL) {
3571 DLIST_REMOVE(session->conn->sessions, session);
3575 struct smbXcli_session *smbXcli_session_create(TALLOC_CTX *mem_ctx,
3576 struct smbXcli_conn *conn)
3578 struct smbXcli_session *session;
3580 session = talloc_zero(mem_ctx, struct smbXcli_session);
3581 if (session == NULL) {
3584 talloc_set_destructor(session, smbXcli_session_destructor);
3586 DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
3587 session->conn = conn;
3592 uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
3594 struct smbXcli_conn *conn = session->conn;
3595 uint8_t security_mode = 0;
3598 return security_mode;
3601 security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
3602 if (conn->mandatory_signing) {
3603 security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
3606 return security_mode;
3609 uint64_t smb2cli_session_current_id(struct smbXcli_session *session)
3611 return session->smb2.session_id;
3614 void smb2cli_session_set_id_and_flags(struct smbXcli_session *session,
3615 uint64_t session_id,
3616 uint16_t session_flags)
3618 session->smb2.session_id = session_id;
3619 session->smb2.session_flags = session_flags;
3622 NTSTATUS smb2cli_session_update_session_key(struct smbXcli_session *session,
3623 const DATA_BLOB session_key,
3624 const struct iovec *recv_iov)
3626 struct smbXcli_conn *conn = session->conn;
3627 uint16_t no_sign_flags;
3628 DATA_BLOB signing_key;
3632 return NT_STATUS_INVALID_PARAMETER_MIX;
3635 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3637 if (session->smb2.session_flags & no_sign_flags) {
3638 session->smb2.should_sign = false;
3639 return NT_STATUS_OK;
3642 if (session->smb2.signing_key.length > 0) {
3643 signing_key = session->smb2.signing_key;
3645 signing_key = session_key;
3648 status = smb2_signing_check_pdu(signing_key, recv_iov, 3);
3649 if (!NT_STATUS_IS_OK(status)) {
3653 session->smb2.session_key = data_blob_dup_talloc(session, session_key);
3654 if (session->smb2.session_key.data == NULL) {
3655 return NT_STATUS_NO_MEMORY;
3658 if (session->smb2.signing_key.length > 0) {
3659 return NT_STATUS_OK;
3662 session->smb2.signing_key = data_blob_dup_talloc(session, signing_key);
3663 if (session->smb2.signing_key.data == NULL) {
3664 return NT_STATUS_NO_MEMORY;
3667 session->smb2.should_sign = false;
3669 if (conn->desire_signing) {
3670 session->smb2.should_sign = true;
3673 if (conn->smb2.server.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
3674 session->smb2.should_sign = true;
3677 return NT_STATUS_OK;