2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "../libcli/smb/smb_common.h"
28 #include "../libcli/smb/smb_seal.h"
29 #include "../libcli/smb/smb_signing.h"
30 #include "../libcli/smb/read_smb.h"
31 #include "smbXcli_base.h"
32 #include "librpc/ndr/libndr.h"
36 struct sockaddr_storage local_ss;
37 struct sockaddr_storage remote_ss;
38 const char *remote_name;
40 struct tevent_queue *outgoing;
41 struct tevent_req **pending;
42 struct tevent_req *read_smb_req;
44 enum protocol_types protocol;
47 bool mandatory_signing;
50 * The incoming dispatch function should return:
51 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
52 * - NT_STATUS_OK, if no more processing is desired, e.g.
53 * the dispatch function called
55 * - All other return values disconnect the connection.
57 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
63 uint32_t capabilities;
68 uint32_t capabilities;
71 uint16_t security_mode;
80 const char *workgroup;
86 uint32_t capabilities;
91 struct smb_signing_state *signing;
92 struct smb_trans_enc_state *trans_enc;
97 uint16_t security_mode;
102 uint32_t capabilities;
103 uint16_t security_mode;
105 uint32_t max_trans_size;
106 uint32_t max_read_size;
107 uint32_t max_write_size;
114 uint16_t cur_credits;
115 uint16_t max_credits;
119 struct smbXcli_req_state {
120 struct tevent_context *ev;
121 struct smbXcli_conn *conn;
123 uint8_t length_hdr[4];
130 /* Space for the header including the wct */
131 uint8_t hdr[HDR_VWV];
134 * For normal requests, smb1cli_req_send chooses a mid.
135 * SecondaryV trans requests need to use the mid of the primary
136 * request, so we need a place to store it.
137 * Assume it is set if != 0.
142 uint8_t bytecount_buf[2];
144 #define MAX_SMB_IOV 5
145 /* length_hdr, hdr, words, byte_count, buffers */
146 struct iovec iov[1 + 3 + MAX_SMB_IOV];
150 struct tevent_req **chained_requests;
153 NTSTATUS recv_status;
154 /* always an array of 3 talloc elements */
155 struct iovec *recv_iov;
159 const uint8_t *fixed;
165 uint8_t pad[7]; /* padding space for compounding */
167 /* always an array of 3 talloc elements */
168 struct iovec *recv_iov;
170 uint16_t credit_charge;
174 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
177 * NT_STATUS_OK, means we do not notify the callers
179 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
181 if (conn->smb1.trans_enc) {
182 common_free_encryption_state(&conn->smb1.trans_enc);
188 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
190 const char *remote_name,
191 enum smb_signing_setting signing_state,
192 uint32_t smb1_capabilities,
193 struct GUID *client_guid)
195 struct smbXcli_conn *conn = NULL;
197 struct sockaddr *sa = NULL;
201 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
206 conn->remote_name = talloc_strdup(conn, remote_name);
207 if (conn->remote_name == NULL) {
213 ss = (void *)&conn->local_ss;
214 sa = (struct sockaddr *)ss;
215 sa_length = sizeof(conn->local_ss);
216 ret = getsockname(fd, sa, &sa_length);
220 ss = (void *)&conn->remote_ss;
221 sa = (struct sockaddr *)ss;
222 sa_length = sizeof(conn->remote_ss);
223 ret = getpeername(fd, sa, &sa_length);
228 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
229 if (conn->outgoing == NULL) {
232 conn->pending = NULL;
234 conn->protocol = PROTOCOL_NONE;
236 switch (signing_state) {
237 case SMB_SIGNING_OFF:
239 conn->allow_signing = false;
240 conn->desire_signing = false;
241 conn->mandatory_signing = false;
243 case SMB_SIGNING_DEFAULT:
244 case SMB_SIGNING_IF_REQUIRED:
245 /* if the server requires it */
246 conn->allow_signing = true;
247 conn->desire_signing = false;
248 conn->mandatory_signing = false;
250 case SMB_SIGNING_REQUIRED:
252 conn->allow_signing = true;
253 conn->desire_signing = true;
254 conn->mandatory_signing = true;
258 conn->smb1.client.capabilities = smb1_capabilities;
259 conn->smb1.client.max_xmit = UINT16_MAX;
261 conn->smb1.capabilities = conn->smb1.client.capabilities;
262 conn->smb1.max_xmit = 1024;
266 /* initialise signing */
267 conn->smb1.signing = smb_signing_init(conn,
269 conn->desire_signing,
270 conn->mandatory_signing);
271 if (!conn->smb1.signing) {
275 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
276 if (conn->mandatory_signing) {
277 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
280 conn->smb2.client.guid = *client_guid;
283 conn->smb2.cur_credits = 1;
284 conn->smb2.max_credits = 0;
286 talloc_set_destructor(conn, smbXcli_conn_destructor);
294 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
300 if (conn->fd == -1) {
307 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
309 return conn->protocol;
312 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
314 if (conn->protocol >= PROTOCOL_SMB2_02) {
318 if (conn->smb1.capabilities & CAP_UNICODE) {
325 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
327 set_socket_options(conn->fd, options);
330 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
332 return &conn->local_ss;
335 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
337 return &conn->remote_ss;
340 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
342 return conn->remote_name;
345 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
347 if (conn->protocol >= PROTOCOL_SMB2_02) {
354 return conn->smb1.server.max_mux;
357 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
359 if (conn->protocol >= PROTOCOL_SMB2_02) {
360 return conn->smb2.server.system_time;
363 return conn->smb1.server.system_time;
366 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
368 if (conn->protocol >= PROTOCOL_SMB2_02) {
369 return &conn->smb2.server.gss_blob;
372 return &conn->smb1.server.gss_blob;
375 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
377 if (conn->protocol >= PROTOCOL_SMB2_02) {
378 return &conn->smb2.server.guid;
381 return &conn->smb1.server.guid;
384 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
386 return conn->smb1.capabilities;
389 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
391 return conn->smb1.max_xmit;
394 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
396 return conn->smb1.server.session_key;
399 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
401 return conn->smb1.server.challenge;
404 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
406 return conn->smb1.server.security_mode;
409 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
411 return conn->smb1.server.time_zone;
414 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
415 const DATA_BLOB user_session_key,
416 const DATA_BLOB response)
418 return smb_signing_activate(conn->smb1.signing,
423 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
424 const uint8_t *buf, uint32_t seqnum)
426 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
429 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
431 return smb_signing_is_active(conn->smb1.signing);
434 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
435 struct smb_trans_enc_state *es)
437 /* Replace the old state, if any. */
438 if (conn->smb1.trans_enc) {
439 common_free_encryption_state(&conn->smb1.trans_enc);
441 conn->smb1.trans_enc = es;
444 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
446 return common_encryption_on(conn->smb1.trans_enc);
450 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
452 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
453 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
455 if (NT_STATUS_IS_OK(status)) {
459 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
463 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
467 * Is the SMB command able to hold an AND_X successor
468 * @param[in] cmd The SMB command in question
469 * @retval Can we add a chained request after "cmd"?
471 bool smb1cli_is_andx_req(uint8_t cmd)
491 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
493 size_t num_pending = talloc_array_length(conn->pending);
499 result = conn->smb1.mid++;
500 if ((result == 0) || (result == 0xffff)) {
504 for (i=0; i<num_pending; i++) {
505 if (result == smb1cli_req_mid(conn->pending[i])) {
510 if (i == num_pending) {
516 void smbXcli_req_unset_pending(struct tevent_req *req)
518 struct smbXcli_req_state *state =
520 struct smbXcli_req_state);
521 struct smbXcli_conn *conn = state->conn;
522 size_t num_pending = talloc_array_length(conn->pending);
525 if (state->smb1.mid != 0) {
527 * This is a [nt]trans[2] request which waits
528 * for more than one reply.
533 talloc_set_destructor(req, NULL);
535 if (num_pending == 1) {
537 * The pending read_smb tevent_req is a child of
538 * conn->pending. So if nothing is pending anymore, we need to
539 * delete the socket read fde.
541 TALLOC_FREE(conn->pending);
542 conn->read_smb_req = NULL;
546 for (i=0; i<num_pending; i++) {
547 if (req == conn->pending[i]) {
551 if (i == num_pending) {
553 * Something's seriously broken. Just returning here is the
554 * right thing nevertheless, the point of this routine is to
555 * remove ourselves from conn->pending.
561 * Remove ourselves from the conn->pending array
563 for (; i < (num_pending - 1); i++) {
564 conn->pending[i] = conn->pending[i+1];
568 * No NULL check here, we're shrinking by sizeof(void *), and
569 * talloc_realloc just adjusts the size for this.
571 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
576 static int smbXcli_req_destructor(struct tevent_req *req)
578 struct smbXcli_req_state *state =
580 struct smbXcli_req_state);
583 * Make sure we really remove it from
584 * the pending array on destruction.
587 smbXcli_req_unset_pending(req);
591 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
593 bool smbXcli_req_set_pending(struct tevent_req *req)
595 struct smbXcli_req_state *state =
597 struct smbXcli_req_state);
598 struct smbXcli_conn *conn;
599 struct tevent_req **pending;
604 if (!smbXcli_conn_is_connected(conn)) {
608 num_pending = talloc_array_length(conn->pending);
610 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
612 if (pending == NULL) {
615 pending[num_pending] = req;
616 conn->pending = pending;
617 talloc_set_destructor(req, smbXcli_req_destructor);
619 if (!smbXcli_conn_receive_next(conn)) {
621 * the caller should notify the current request
623 * And all other pending requests get notified
624 * by smbXcli_conn_disconnect().
626 smbXcli_req_unset_pending(req);
627 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
634 static void smbXcli_conn_received(struct tevent_req *subreq);
636 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
638 size_t num_pending = talloc_array_length(conn->pending);
639 struct tevent_req *req;
640 struct smbXcli_req_state *state;
642 if (conn->read_smb_req != NULL) {
646 if (num_pending == 0) {
647 if (conn->smb2.mid < UINT64_MAX) {
648 /* no more pending requests, so we are done for now */
653 * If there are no more SMB2 requests possible,
654 * because we are out of message ids,
655 * we need to disconnect.
657 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
661 req = conn->pending[0];
662 state = tevent_req_data(req, struct smbXcli_req_state);
665 * We're the first ones, add the read_smb request that waits for the
666 * answer from the server
668 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
669 if (conn->read_smb_req == NULL) {
672 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
676 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
678 if (conn->fd != -1) {
684 * Cancel all pending requests. We do not do a for-loop walking
685 * conn->pending because that array changes in
686 * smbXcli_req_unset_pending.
688 while (talloc_array_length(conn->pending) > 0) {
689 struct tevent_req *req;
690 struct smbXcli_req_state *state;
691 struct tevent_req **chain;
695 req = conn->pending[0];
696 state = tevent_req_data(req, struct smbXcli_req_state);
698 if (state->smb1.chained_requests == NULL) {
700 * We're dead. No point waiting for trans2
705 smbXcli_req_unset_pending(req);
707 if (NT_STATUS_IS_OK(status)) {
708 /* do not notify the callers */
713 * we need to defer the callback, because we may notify
714 * more then one caller.
716 tevent_req_defer_callback(req, state->ev);
717 tevent_req_nterror(req, status);
721 chain = talloc_move(conn, &state->smb1.chained_requests);
722 num_chained = talloc_array_length(chain);
724 for (i=0; i<num_chained; i++) {
726 state = tevent_req_data(req, struct smbXcli_req_state);
729 * We're dead. No point waiting for trans2
734 smbXcli_req_unset_pending(req);
736 if (NT_STATUS_IS_OK(status)) {
737 /* do not notify the callers */
742 * we need to defer the callback, because we may notify
743 * more then one caller.
745 tevent_req_defer_callback(req, state->ev);
746 tevent_req_nterror(req, status);
753 * Fetch a smb request's mid. Only valid after the request has been sent by
754 * smb1cli_req_send().
756 uint16_t smb1cli_req_mid(struct tevent_req *req)
758 struct smbXcli_req_state *state =
760 struct smbXcli_req_state);
762 if (state->smb1.mid != 0) {
763 return state->smb1.mid;
766 return SVAL(state->smb1.hdr, HDR_MID);
769 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
771 struct smbXcli_req_state *state =
773 struct smbXcli_req_state);
775 state->smb1.mid = mid;
778 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
780 struct smbXcli_req_state *state =
782 struct smbXcli_req_state);
784 return state->smb1.seqnum;
787 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
789 struct smbXcli_req_state *state =
791 struct smbXcli_req_state);
793 state->smb1.seqnum = seqnum;
796 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
800 for (i=0; i<count; i++) {
801 result += iov[i].iov_len;
806 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
807 const struct iovec *iov,
810 size_t len = smbXcli_iov_len(iov, count);
815 buf = talloc_array(mem_ctx, uint8_t, len);
820 for (i=0; i<count; i++) {
821 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
822 copied += iov[i].iov_len;
827 static void smb1cli_req_flags(enum protocol_types protocol,
828 uint32_t smb1_capabilities,
830 uint8_t additional_flags,
833 uint16_t additional_flags2,
834 uint16_t clear_flags2,
840 if (protocol >= PROTOCOL_LANMAN1) {
841 flags |= FLAG_CASELESS_PATHNAMES;
842 flags |= FLAG_CANONICAL_PATHNAMES;
845 if (protocol >= PROTOCOL_LANMAN2) {
846 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
847 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
850 if (protocol >= PROTOCOL_NT1) {
851 flags2 |= FLAGS2_IS_LONG_NAME;
853 if (smb1_capabilities & CAP_UNICODE) {
854 flags2 |= FLAGS2_UNICODE_STRINGS;
856 if (smb1_capabilities & CAP_STATUS32) {
857 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
859 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
860 flags2 |= FLAGS2_EXTENDED_SECURITY;
864 flags |= additional_flags;
865 flags &= ~clear_flags;
866 flags2 |= additional_flags2;
867 flags2 &= ~clear_flags2;
873 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
874 struct tevent_context *ev,
875 struct smbXcli_conn *conn,
877 uint8_t additional_flags,
879 uint16_t additional_flags2,
880 uint16_t clear_flags2,
881 uint32_t timeout_msec,
885 uint8_t wct, uint16_t *vwv,
887 struct iovec *bytes_iov)
889 struct tevent_req *req;
890 struct smbXcli_req_state *state;
894 if (iov_count > MAX_SMB_IOV) {
896 * Should not happen :-)
901 req = tevent_req_create(mem_ctx, &state,
902 struct smbXcli_req_state);
909 state->smb1.recv_cmd = 0xFF;
910 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
911 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
912 if (state->smb1.recv_iov == NULL) {
917 smb1cli_req_flags(conn->protocol,
918 conn->smb1.capabilities,
927 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
928 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
929 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
930 SCVAL(state->smb1.hdr, HDR_FLG, flags);
931 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
932 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
933 SSVAL(state->smb1.hdr, HDR_TID, tid);
934 SSVAL(state->smb1.hdr, HDR_PID, pid);
935 SSVAL(state->smb1.hdr, HDR_UID, uid);
936 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
937 SSVAL(state->smb1.hdr, HDR_WCT, wct);
939 state->smb1.vwv = vwv;
941 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
943 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
944 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
945 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
946 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
947 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
948 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
949 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
950 state->smb1.iov[3].iov_len = sizeof(uint16_t);
952 if (iov_count != 0) {
953 memcpy(&state->smb1.iov[4], bytes_iov,
954 iov_count * sizeof(*bytes_iov));
956 state->smb1.iov_count = iov_count + 4;
958 if (timeout_msec > 0) {
959 struct timeval endtime;
961 endtime = timeval_current_ofs_msec(timeout_msec);
962 if (!tevent_req_set_endtime(req, ev, endtime)) {
967 switch (smb_command) {
972 state->one_way = true;
976 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
977 state->one_way = true;
985 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
986 struct iovec *iov, int iov_count,
992 * Obvious optimization: Make cli_calculate_sign_mac work with struct
993 * iovec directly. MD5Update would do that just fine.
997 return NT_STATUS_INVALID_PARAMETER_MIX;
999 if (iov[0].iov_len != NBT_HDR_SIZE) {
1000 return NT_STATUS_INVALID_PARAMETER_MIX;
1002 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1003 return NT_STATUS_INVALID_PARAMETER_MIX;
1005 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1006 return NT_STATUS_INVALID_PARAMETER_MIX;
1008 if (iov[3].iov_len != sizeof(uint16_t)) {
1009 return NT_STATUS_INVALID_PARAMETER_MIX;
1012 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1014 return NT_STATUS_NO_MEMORY;
1017 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
1018 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1019 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1022 return NT_STATUS_OK;
1025 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1026 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1027 TALLOC_CTX *tmp_mem,
1030 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1031 struct smbXcli_req_state *state,
1032 struct iovec *iov, int iov_count)
1034 struct tevent_req *subreq;
1038 if (!smbXcli_conn_is_connected(state->conn)) {
1039 return NT_STATUS_CONNECTION_DISCONNECTED;
1042 if (state->conn->protocol > PROTOCOL_NT1) {
1043 return NT_STATUS_REVISION_MISMATCH;
1046 if (iov_count < 4) {
1047 return NT_STATUS_INVALID_PARAMETER_MIX;
1049 if (iov[0].iov_len != NBT_HDR_SIZE) {
1050 return NT_STATUS_INVALID_PARAMETER_MIX;
1052 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1053 return NT_STATUS_INVALID_PARAMETER_MIX;
1055 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1056 return NT_STATUS_INVALID_PARAMETER_MIX;
1058 if (iov[3].iov_len != sizeof(uint16_t)) {
1059 return NT_STATUS_INVALID_PARAMETER_MIX;
1062 if (state->smb1.mid != 0) {
1063 mid = state->smb1.mid;
1065 mid = smb1cli_alloc_mid(state->conn);
1067 SSVAL(iov[1].iov_base, HDR_MID, mid);
1069 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1071 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1072 &state->smb1.seqnum);
1074 if (!NT_STATUS_IS_OK(status)) {
1079 * If we supported multiple encrytion contexts
1080 * here we'd look up based on tid.
1082 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1083 char *buf, *enc_buf;
1085 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1087 return NT_STATUS_NO_MEMORY;
1089 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1090 (char *)buf, &enc_buf);
1092 if (!NT_STATUS_IS_OK(status)) {
1093 DEBUG(0, ("Error in encrypting client message: %s\n",
1094 nt_errstr(status)));
1097 buf = (char *)talloc_memdup(state, enc_buf,
1098 smb_len_nbt(enc_buf)+4);
1101 return NT_STATUS_NO_MEMORY;
1103 iov[0].iov_base = (void *)buf;
1104 iov[0].iov_len = talloc_get_size(buf);
1108 if (state->conn->dispatch_incoming == NULL) {
1109 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1112 subreq = writev_send(state, state->ev, state->conn->outgoing,
1113 state->conn->fd, false, iov, iov_count);
1114 if (subreq == NULL) {
1115 return NT_STATUS_NO_MEMORY;
1117 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1118 return NT_STATUS_OK;
1121 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1122 struct tevent_context *ev,
1123 struct smbXcli_conn *conn,
1124 uint8_t smb_command,
1125 uint8_t additional_flags,
1126 uint8_t clear_flags,
1127 uint16_t additional_flags2,
1128 uint16_t clear_flags2,
1129 uint32_t timeout_msec,
1133 uint8_t wct, uint16_t *vwv,
1135 const uint8_t *bytes)
1137 struct tevent_req *req;
1141 iov.iov_base = discard_const_p(void, bytes);
1142 iov.iov_len = num_bytes;
1144 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1145 additional_flags, clear_flags,
1146 additional_flags2, clear_flags2,
1153 if (!tevent_req_is_in_progress(req)) {
1154 return tevent_req_post(req, ev);
1156 status = smb1cli_req_chain_submit(&req, 1);
1157 if (tevent_req_nterror(req, status)) {
1158 return tevent_req_post(req, ev);
1163 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1165 struct tevent_req *req =
1166 tevent_req_callback_data(subreq,
1168 struct smbXcli_req_state *state =
1169 tevent_req_data(req,
1170 struct smbXcli_req_state);
1174 nwritten = writev_recv(subreq, &err);
1175 TALLOC_FREE(subreq);
1176 if (nwritten == -1) {
1177 NTSTATUS status = map_nt_error_from_unix_common(err);
1178 smbXcli_conn_disconnect(state->conn, status);
1182 if (state->one_way) {
1183 state->inbuf = NULL;
1184 tevent_req_done(req);
1188 if (!smbXcli_req_set_pending(req)) {
1189 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1194 static void smbXcli_conn_received(struct tevent_req *subreq)
1196 struct smbXcli_conn *conn =
1197 tevent_req_callback_data(subreq,
1198 struct smbXcli_conn);
1199 TALLOC_CTX *frame = talloc_stackframe();
1205 if (subreq != conn->read_smb_req) {
1206 DEBUG(1, ("Internal error: cli_smb_received called with "
1207 "unexpected subreq\n"));
1208 status = NT_STATUS_INTERNAL_ERROR;
1209 smbXcli_conn_disconnect(conn, status);
1213 conn->read_smb_req = NULL;
1215 received = read_smb_recv(subreq, frame, &inbuf, &err);
1216 TALLOC_FREE(subreq);
1217 if (received == -1) {
1218 status = map_nt_error_from_unix_common(err);
1219 smbXcli_conn_disconnect(conn, status);
1224 status = conn->dispatch_incoming(conn, frame, inbuf);
1226 if (NT_STATUS_IS_OK(status)) {
1228 * We should not do any more processing
1229 * as the dispatch function called
1230 * tevent_req_done().
1233 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1235 * We got an error, so notify all pending requests
1237 smbXcli_conn_disconnect(conn, status);
1242 * We got NT_STATUS_RETRY, so we may ask for a
1243 * next incoming pdu.
1245 if (!smbXcli_conn_receive_next(conn)) {
1246 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1250 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1251 struct iovec **piov, int *pnum_iov)
1262 buflen = smb_len_nbt(buf);
1265 hdr = buf + NBT_HDR_SIZE;
1267 if (buflen < MIN_SMB_SIZE) {
1268 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1272 * This returns iovec elements in the following order:
1287 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1289 return NT_STATUS_NO_MEMORY;
1291 iov[0].iov_base = hdr;
1292 iov[0].iov_len = HDR_WCT;
1295 cmd = CVAL(hdr, HDR_COM);
1299 size_t len = buflen - taken;
1301 struct iovec *iov_tmp;
1308 * we need at least WCT and BCC
1310 needed = sizeof(uint8_t) + sizeof(uint16_t);
1312 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1313 __location__, (int)len, (int)needed));
1318 * Now we check if the specified words are there
1320 wct = CVAL(hdr, wct_ofs);
1321 needed += wct * sizeof(uint16_t);
1323 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1324 __location__, (int)len, (int)needed));
1329 * Now we check if the specified bytes are there
1331 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1332 bcc = SVAL(hdr, bcc_ofs);
1333 needed += bcc * sizeof(uint8_t);
1335 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1336 __location__, (int)len, (int)needed));
1341 * we allocate 2 iovec structures for words and bytes
1343 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1345 if (iov_tmp == NULL) {
1347 return NT_STATUS_NO_MEMORY;
1350 cur = &iov[num_iov];
1353 cur[0].iov_len = wct * sizeof(uint16_t);
1354 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1355 cur[1].iov_len = bcc * sizeof(uint8_t);
1356 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1360 if (!smb1cli_is_andx_req(cmd)) {
1362 * If the current command does not have AndX chanining
1368 if (wct == 0 && bcc == 0) {
1370 * An empty response also ends the chain,
1371 * most likely with an error.
1377 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1378 __location__, (int)wct, (int)cmd));
1381 cmd = CVAL(cur[0].iov_base, 0);
1384 * If it is the end of the chain we are also done.
1388 wct_ofs = SVAL(cur[0].iov_base, 2);
1390 if (wct_ofs < taken) {
1391 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1393 if (wct_ofs > buflen) {
1394 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1398 * we consumed everything up to the start of the next
1404 remaining = buflen - taken;
1406 if (remaining > 0 && num_iov >= 3) {
1408 * The last DATA block gets the remaining
1409 * bytes, this is needed to support
1410 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1412 iov[num_iov-1].iov_len += remaining;
1416 *pnum_iov = num_iov;
1417 return NT_STATUS_OK;
1421 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1424 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1425 TALLOC_CTX *tmp_mem,
1428 struct tevent_req *req;
1429 struct smbXcli_req_state *state;
1436 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1437 struct iovec *iov = NULL;
1439 struct tevent_req **chain = NULL;
1440 size_t num_chained = 0;
1441 size_t num_responses = 0;
1443 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1444 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1445 DEBUG(10, ("Got non-SMB PDU\n"));
1446 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1450 * If we supported multiple encrytion contexts
1451 * here we'd look up based on tid.
1453 if (common_encryption_on(conn->smb1.trans_enc)
1454 && (CVAL(inbuf, 0) == 0)) {
1455 uint16_t enc_ctx_num;
1457 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1458 if (!NT_STATUS_IS_OK(status)) {
1459 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1460 nt_errstr(status)));
1464 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1465 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1467 conn->smb1.trans_enc->enc_ctx_num));
1468 return NT_STATUS_INVALID_HANDLE;
1471 status = common_decrypt_buffer(conn->smb1.trans_enc,
1473 if (!NT_STATUS_IS_OK(status)) {
1474 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1475 nt_errstr(status)));
1480 mid = SVAL(inhdr, HDR_MID);
1481 num_pending = talloc_array_length(conn->pending);
1483 for (i=0; i<num_pending; i++) {
1484 if (mid == smb1cli_req_mid(conn->pending[i])) {
1488 if (i == num_pending) {
1489 /* Dump unexpected reply */
1490 return NT_STATUS_RETRY;
1493 oplock_break = false;
1495 if (mid == 0xffff) {
1497 * Paranoia checks that this is really an oplock break request.
1499 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1500 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1501 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1502 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1503 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1505 if (!oplock_break) {
1506 /* Dump unexpected reply */
1507 return NT_STATUS_RETRY;
1511 req = conn->pending[i];
1512 state = tevent_req_data(req, struct smbXcli_req_state);
1514 if (!oplock_break /* oplock breaks are not signed */
1515 && !smb_signing_check_pdu(conn->smb1.signing,
1516 inbuf, state->smb1.seqnum+1)) {
1517 DEBUG(10, ("cli_check_sign_mac failed\n"));
1518 return NT_STATUS_ACCESS_DENIED;
1521 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1523 if (!NT_STATUS_IS_OK(status)) {
1524 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1525 nt_errstr(status)));
1529 cmd = CVAL(inhdr, HDR_COM);
1530 status = smb1cli_pull_raw_error(inhdr);
1532 if (state->smb1.chained_requests == NULL) {
1534 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1537 smbXcli_req_unset_pending(req);
1539 state->smb1.recv_cmd = cmd;
1540 state->smb1.recv_status = status;
1541 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1543 state->smb1.recv_iov[0] = iov[0];
1544 state->smb1.recv_iov[1] = iov[1];
1545 state->smb1.recv_iov[2] = iov[2];
1547 if (talloc_array_length(conn->pending) == 0) {
1548 tevent_req_done(req);
1549 return NT_STATUS_OK;
1552 tevent_req_defer_callback(req, state->ev);
1553 tevent_req_done(req);
1554 return NT_STATUS_RETRY;
1557 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1558 num_chained = talloc_array_length(chain);
1559 num_responses = (num_iov - 1)/2;
1561 if (num_responses > num_chained) {
1562 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1565 for (i=0; i<num_chained; i++) {
1566 size_t iov_idx = 1 + (i*2);
1567 struct iovec *cur = &iov[iov_idx];
1571 state = tevent_req_data(req, struct smbXcli_req_state);
1573 smbXcli_req_unset_pending(req);
1576 * as we finish multiple requests here
1577 * we need to defer the callbacks as
1578 * they could destroy our current stack state.
1580 tevent_req_defer_callback(req, state->ev);
1582 if (i >= num_responses) {
1583 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1587 state->smb1.recv_cmd = cmd;
1589 if (i == (num_responses - 1)) {
1591 * The last request in the chain gets the status
1593 state->smb1.recv_status = status;
1595 cmd = CVAL(cur[0].iov_base, 0);
1596 state->smb1.recv_status = NT_STATUS_OK;
1599 state->inbuf = inbuf;
1602 * Note: here we use talloc_reference() in a way
1603 * that does not expose it to the caller.
1605 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1606 if (tevent_req_nomem(inbuf_ref, req)) {
1610 /* copy the related buffers */
1611 state->smb1.recv_iov[0] = iov[0];
1612 state->smb1.recv_iov[1] = cur[0];
1613 state->smb1.recv_iov[2] = cur[1];
1615 tevent_req_done(req);
1618 return NT_STATUS_RETRY;
1621 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1622 TALLOC_CTX *mem_ctx,
1623 struct iovec **piov,
1627 uint32_t *pvwv_offset,
1628 uint32_t *pnum_bytes,
1630 uint32_t *pbytes_offset,
1632 const struct smb1cli_req_expected_response *expected,
1633 size_t num_expected)
1635 struct smbXcli_req_state *state =
1636 tevent_req_data(req,
1637 struct smbXcli_req_state);
1638 NTSTATUS status = NT_STATUS_OK;
1639 struct iovec *recv_iov = NULL;
1640 uint8_t *hdr = NULL;
1642 uint32_t vwv_offset = 0;
1643 uint16_t *vwv = NULL;
1644 uint32_t num_bytes = 0;
1645 uint32_t bytes_offset = 0;
1646 uint8_t *bytes = NULL;
1648 bool found_status = false;
1649 bool found_size = false;
1663 if (pvwv_offset != NULL) {
1666 if (pnum_bytes != NULL) {
1669 if (pbytes != NULL) {
1672 if (pbytes_offset != NULL) {
1675 if (pinbuf != NULL) {
1679 if (state->inbuf != NULL) {
1680 recv_iov = state->smb1.recv_iov;
1681 hdr = (uint8_t *)recv_iov[0].iov_base;
1682 wct = recv_iov[1].iov_len/2;
1683 vwv = (uint16_t *)recv_iov[1].iov_base;
1684 vwv_offset = PTR_DIFF(vwv, hdr);
1685 num_bytes = recv_iov[2].iov_len;
1686 bytes = (uint8_t *)recv_iov[2].iov_base;
1687 bytes_offset = PTR_DIFF(bytes, hdr);
1690 if (tevent_req_is_nterror(req, &status)) {
1691 for (i=0; i < num_expected; i++) {
1692 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1693 found_status = true;
1699 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1705 if (num_expected == 0) {
1706 found_status = true;
1710 status = state->smb1.recv_status;
1712 for (i=0; i < num_expected; i++) {
1713 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1717 found_status = true;
1718 if (expected[i].wct == 0) {
1723 if (expected[i].wct == wct) {
1729 if (!found_status) {
1734 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1738 *piov = talloc_move(mem_ctx, &recv_iov);
1750 if (pvwv_offset != NULL) {
1751 *pvwv_offset = vwv_offset;
1753 if (pnum_bytes != NULL) {
1754 *pnum_bytes = num_bytes;
1756 if (pbytes != NULL) {
1759 if (pbytes_offset != NULL) {
1760 *pbytes_offset = bytes_offset;
1762 if (pinbuf != NULL) {
1763 *pinbuf = state->inbuf;
1769 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1776 for (i=0; i<num_reqs; i++) {
1777 struct smbXcli_req_state *state;
1778 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1779 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1780 state->smb1.iov_count-2);
1781 wct_ofs = (wct_ofs + 3) & ~3;
1786 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1788 struct smbXcli_req_state *first_state =
1789 tevent_req_data(reqs[0],
1790 struct smbXcli_req_state);
1791 struct smbXcli_req_state *state;
1793 size_t chain_padding = 0;
1795 struct iovec *iov = NULL;
1796 struct iovec *this_iov;
1800 if (num_reqs == 1) {
1801 return smb1cli_req_writev_submit(reqs[0], first_state,
1802 first_state->smb1.iov,
1803 first_state->smb1.iov_count);
1807 for (i=0; i<num_reqs; i++) {
1808 if (!tevent_req_is_in_progress(reqs[i])) {
1809 return NT_STATUS_INTERNAL_ERROR;
1812 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1814 if (state->smb1.iov_count < 4) {
1815 return NT_STATUS_INVALID_PARAMETER_MIX;
1820 * The NBT and SMB header
1833 iovlen += state->smb1.iov_count - 2;
1836 iov = talloc_zero_array(first_state, struct iovec, iovlen);
1838 return NT_STATUS_NO_MEMORY;
1841 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1842 first_state, reqs, sizeof(*reqs) * num_reqs);
1843 if (first_state->smb1.chained_requests == NULL) {
1845 return NT_STATUS_NO_MEMORY;
1848 wct_offset = HDR_WCT;
1851 for (i=0; i<num_reqs; i++) {
1852 size_t next_padding = 0;
1855 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1857 if (i < num_reqs-1) {
1858 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1859 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1861 TALLOC_FREE(first_state->smb1.chained_requests);
1862 return NT_STATUS_INVALID_PARAMETER_MIX;
1866 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1867 state->smb1.iov_count-2) + 1;
1868 if ((wct_offset % 4) != 0) {
1869 next_padding = 4 - (wct_offset % 4);
1871 wct_offset += next_padding;
1872 vwv = state->smb1.vwv;
1874 if (i < num_reqs-1) {
1875 struct smbXcli_req_state *next_state =
1876 tevent_req_data(reqs[i+1],
1877 struct smbXcli_req_state);
1878 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1880 SSVAL(vwv+1, 0, wct_offset);
1881 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1882 /* properly end the chain */
1883 SCVAL(vwv+0, 0, 0xff);
1884 SCVAL(vwv+0, 1, 0xff);
1890 * The NBT and SMB header
1892 this_iov[0] = state->smb1.iov[0];
1893 this_iov[1] = state->smb1.iov[1];
1897 * This one is a bit subtle. We have to add
1898 * chain_padding bytes between the requests, and we
1899 * have to also include the wct field of the
1900 * subsequent requests. We use the subsequent header
1901 * for the padding, it contains the wct field in its
1904 this_iov[0].iov_len = chain_padding+1;
1905 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1906 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1907 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1912 * copy the words and bytes
1914 memcpy(this_iov, state->smb1.iov+2,
1915 sizeof(struct iovec) * (state->smb1.iov_count-2));
1916 this_iov += state->smb1.iov_count - 2;
1917 chain_padding = next_padding;
1920 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1921 if (nbt_len > first_state->conn->smb1.max_xmit) {
1923 TALLOC_FREE(first_state->smb1.chained_requests);
1924 return NT_STATUS_INVALID_PARAMETER_MIX;
1927 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
1928 if (!NT_STATUS_IS_OK(status)) {
1930 TALLOC_FREE(first_state->smb1.chained_requests);
1934 return NT_STATUS_OK;
1937 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1939 return ((tevent_queue_length(conn->outgoing) != 0)
1940 || (talloc_array_length(conn->pending) != 0));
1943 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
1945 return conn->smb2.server.capabilities;
1948 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
1950 return conn->smb2.server.security_mode;
1953 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
1955 return conn->smb2.server.max_trans_size;
1958 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
1960 return conn->smb2.server.max_read_size;
1963 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
1965 return conn->smb2.server.max_write_size;
1968 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
1969 uint16_t max_credits)
1971 conn->smb2.max_credits = max_credits;
1974 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
1975 struct tevent_context *ev,
1976 struct smbXcli_conn *conn,
1978 uint32_t additional_flags,
1979 uint32_t clear_flags,
1980 uint32_t timeout_msec,
1984 const uint8_t *fixed,
1989 struct tevent_req *req;
1990 struct smbXcli_req_state *state;
1993 req = tevent_req_create(mem_ctx, &state,
1994 struct smbXcli_req_state);
2002 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2003 if (state->smb2.recv_iov == NULL) {
2008 flags |= additional_flags;
2009 flags &= ~clear_flags;
2011 state->smb2.fixed = fixed;
2012 state->smb2.fixed_len = fixed_len;
2013 state->smb2.dyn = dyn;
2014 state->smb2.dyn_len = dyn_len;
2016 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2017 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2018 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2019 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2020 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2021 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2022 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2025 case SMB2_OP_CANCEL:
2026 state->one_way = true;
2030 * If this is a dummy request, it will have
2031 * UINT64_MAX as message id.
2032 * If we send on break acknowledgement,
2033 * this gets overwritten later.
2035 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2039 if (timeout_msec > 0) {
2040 struct timeval endtime;
2042 endtime = timeval_current_ofs_msec(timeout_msec);
2043 if (!tevent_req_set_endtime(req, ev, endtime)) {
2051 static void smb2cli_writev_done(struct tevent_req *subreq);
2052 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2053 TALLOC_CTX *tmp_mem,
2056 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2059 struct smbXcli_req_state *state;
2060 struct tevent_req *subreq;
2062 int i, num_iov, nbt_len;
2065 * 1 for the nbt length
2066 * per request: HDR, fixed, dyn, padding
2067 * -1 because the last one does not need padding
2070 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2072 return NT_STATUS_NO_MEMORY;
2078 for (i=0; i<num_reqs; i++) {
2086 if (!tevent_req_is_in_progress(reqs[i])) {
2087 return NT_STATUS_INTERNAL_ERROR;
2090 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2092 if (!smbXcli_conn_is_connected(state->conn)) {
2093 return NT_STATUS_CONNECTION_DISCONNECTED;
2096 if ((state->conn->protocol != PROTOCOL_NONE) &&
2097 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2098 return NT_STATUS_REVISION_MISMATCH;
2101 avail = UINT64_MAX - state->conn->smb2.mid;
2103 return NT_STATUS_CONNECTION_ABORTED;
2106 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2107 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2112 charge = MAX(state->smb2.credit_charge, charge);
2114 avail = MIN(avail, state->conn->smb2.cur_credits);
2115 if (avail < charge) {
2116 return NT_STATUS_INTERNAL_ERROR;
2120 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2121 credits = state->conn->smb2.max_credits -
2122 state->conn->smb2.cur_credits;
2124 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2128 mid = state->conn->smb2.mid;
2129 state->conn->smb2.mid += charge;
2130 state->conn->smb2.cur_credits -= charge;
2132 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2133 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2135 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2136 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2138 iov[num_iov].iov_base = state->smb2.hdr;
2139 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2142 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2143 iov[num_iov].iov_len = state->smb2.fixed_len;
2146 if (state->smb2.dyn != NULL) {
2147 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2148 iov[num_iov].iov_len = state->smb2.dyn_len;
2152 reqlen = sizeof(state->smb2.hdr);
2153 reqlen += state->smb2.fixed_len;
2154 reqlen += state->smb2.dyn_len;
2156 if (i < num_reqs-1) {
2157 if ((reqlen % 8) > 0) {
2158 uint8_t pad = 8 - (reqlen % 8);
2159 iov[num_iov].iov_base = state->smb2.pad;
2160 iov[num_iov].iov_len = pad;
2164 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2168 ret = smbXcli_req_set_pending(reqs[i]);
2170 return NT_STATUS_NO_MEMORY;
2175 * TODO: Do signing here
2178 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2179 _smb_setlen_tcp(state->length_hdr, nbt_len);
2180 iov[0].iov_base = state->length_hdr;
2181 iov[0].iov_len = sizeof(state->length_hdr);
2183 if (state->conn->dispatch_incoming == NULL) {
2184 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2187 subreq = writev_send(state, state->ev, state->conn->outgoing,
2188 state->conn->fd, false, iov, num_iov);
2189 if (subreq == NULL) {
2190 return NT_STATUS_NO_MEMORY;
2192 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2193 return NT_STATUS_OK;
2196 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2198 struct smbXcli_req_state *state =
2199 tevent_req_data(req,
2200 struct smbXcli_req_state);
2202 state->smb2.credit_charge = charge;
2205 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2206 struct tevent_context *ev,
2207 struct smbXcli_conn *conn,
2209 uint32_t additional_flags,
2210 uint32_t clear_flags,
2211 uint32_t timeout_msec,
2215 const uint8_t *fixed,
2220 struct tevent_req *req;
2223 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2224 additional_flags, clear_flags,
2227 fixed, fixed_len, dyn, dyn_len);
2231 if (!tevent_req_is_in_progress(req)) {
2232 return tevent_req_post(req, ev);
2234 status = smb2cli_req_compound_submit(&req, 1);
2235 if (tevent_req_nterror(req, status)) {
2236 return tevent_req_post(req, ev);
2241 static void smb2cli_writev_done(struct tevent_req *subreq)
2243 struct tevent_req *req =
2244 tevent_req_callback_data(subreq,
2246 struct smbXcli_req_state *state =
2247 tevent_req_data(req,
2248 struct smbXcli_req_state);
2252 nwritten = writev_recv(subreq, &err);
2253 TALLOC_FREE(subreq);
2254 if (nwritten == -1) {
2255 /* here, we need to notify all pending requests */
2256 NTSTATUS status = map_nt_error_from_unix_common(err);
2257 smbXcli_conn_disconnect(state->conn, status);
2262 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2263 struct iovec **piov, int *pnum_iov)
2273 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2275 return NT_STATUS_NO_MEMORY;
2278 buflen = smb_len_tcp(buf);
2280 first_hdr = buf + NBT_HDR_SIZE;
2282 while (taken < buflen) {
2283 size_t len = buflen - taken;
2284 uint8_t *hdr = first_hdr + taken;
2287 size_t next_command_ofs;
2289 struct iovec *iov_tmp;
2292 * We need the header plus the body length field
2295 if (len < SMB2_HDR_BODY + 2) {
2296 DEBUG(10, ("%d bytes left, expected at least %d\n",
2297 (int)len, SMB2_HDR_BODY));
2300 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2301 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2305 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2306 DEBUG(10, ("Got HDR len %d, expected %d\n",
2307 SVAL(hdr, 4), SMB2_HDR_BODY));
2312 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2313 body_size = SVAL(hdr, SMB2_HDR_BODY);
2315 if (next_command_ofs != 0) {
2316 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2319 if (next_command_ofs > full_size) {
2322 full_size = next_command_ofs;
2324 if (body_size < 2) {
2327 body_size &= 0xfffe;
2329 if (body_size > (full_size - SMB2_HDR_BODY)) {
2333 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2335 if (iov_tmp == NULL) {
2337 return NT_STATUS_NO_MEMORY;
2340 cur = &iov[num_iov];
2343 cur[0].iov_base = hdr;
2344 cur[0].iov_len = SMB2_HDR_BODY;
2345 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2346 cur[1].iov_len = body_size;
2347 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2348 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2354 *pnum_iov = num_iov;
2355 return NT_STATUS_OK;
2359 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2362 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2365 size_t num_pending = talloc_array_length(conn->pending);
2368 for (i=0; i<num_pending; i++) {
2369 struct tevent_req *req = conn->pending[i];
2370 struct smbXcli_req_state *state =
2371 tevent_req_data(req,
2372 struct smbXcli_req_state);
2374 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2381 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2382 TALLOC_CTX *tmp_mem,
2385 struct tevent_req *req;
2386 struct smbXcli_req_state *state = NULL;
2392 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2394 if (!NT_STATUS_IS_OK(status)) {
2398 for (i=0; i<num_iov; i+=3) {
2399 uint8_t *inbuf_ref = NULL;
2400 struct iovec *cur = &iov[i];
2401 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2402 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2403 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2404 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2405 uint16_t req_opcode;
2406 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2407 uint32_t new_credits;
2409 new_credits = conn->smb2.cur_credits;
2410 new_credits += credits;
2411 if (new_credits > UINT16_MAX) {
2412 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2414 conn->smb2.cur_credits += credits;
2416 req = smb2cli_conn_find_pending(conn, mid);
2418 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2420 state = tevent_req_data(req, struct smbXcli_req_state);
2422 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2423 if (opcode != req_opcode) {
2424 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2427 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2428 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2431 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2432 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2433 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2434 uint32_t req_flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2435 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2437 req_flags |= SMB2_HDR_FLAG_ASYNC;
2438 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2439 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2443 smbXcli_req_unset_pending(req);
2446 * There might be more than one response
2447 * we need to defer the notifications
2449 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2454 tevent_req_defer_callback(req, state->ev);
2458 * Note: here we use talloc_reference() in a way
2459 * that does not expose it to the caller.
2461 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2462 if (tevent_req_nomem(inbuf_ref, req)) {
2466 /* copy the related buffers */
2467 state->smb2.recv_iov[0] = cur[0];
2468 state->smb2.recv_iov[1] = cur[1];
2469 state->smb2.recv_iov[2] = cur[2];
2471 tevent_req_done(req);
2475 return NT_STATUS_RETRY;
2478 return NT_STATUS_OK;
2481 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2482 struct iovec **piov,
2483 const struct smb2cli_req_expected_response *expected,
2484 size_t num_expected)
2486 struct smbXcli_req_state *state =
2487 tevent_req_data(req,
2488 struct smbXcli_req_state);
2491 bool found_status = false;
2492 bool found_size = false;
2499 if (tevent_req_is_nterror(req, &status)) {
2500 for (i=0; i < num_expected; i++) {
2501 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2502 found_status = true;
2508 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2514 if (num_expected == 0) {
2515 found_status = true;
2519 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2520 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2522 for (i=0; i < num_expected; i++) {
2523 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2527 found_status = true;
2528 if (expected[i].body_size == 0) {
2533 if (expected[i].body_size == body_size) {
2539 if (!found_status) {
2544 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2548 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
2554 static const struct {
2555 enum protocol_types proto;
2556 const char *smb1_name;
2557 } smb1cli_prots[] = {
2558 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
2559 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
2560 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
2561 {PROTOCOL_LANMAN1, "LANMAN1.0"},
2562 {PROTOCOL_LANMAN2, "LM1.2X002"},
2563 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
2564 {PROTOCOL_LANMAN2, "LANMAN2.1"},
2565 {PROTOCOL_LANMAN2, "Samba"},
2566 {PROTOCOL_NT1, "NT LANMAN 1.0"},
2567 {PROTOCOL_NT1, "NT LM 0.12"},
2568 {PROTOCOL_SMB2_02, "SMB 2.002"},
2569 {PROTOCOL_SMB2_10, "SMB 2.???"},
2572 static const struct {
2573 enum protocol_types proto;
2574 uint16_t smb2_dialect;
2575 } smb2cli_prots[] = {
2576 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
2577 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
2580 struct smbXcli_negprot_state {
2581 struct smbXcli_conn *conn;
2582 struct tevent_context *ev;
2583 uint32_t timeout_msec;
2584 enum protocol_types min_protocol;
2585 enum protocol_types max_protocol;
2589 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
2593 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
2594 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
2595 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
2596 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
2597 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
2598 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
2602 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
2603 struct tevent_context *ev,
2604 struct smbXcli_conn *conn,
2605 uint32_t timeout_msec,
2606 enum protocol_types min_protocol,
2607 enum protocol_types max_protocol)
2609 struct tevent_req *req, *subreq;
2610 struct smbXcli_negprot_state *state;
2612 req = tevent_req_create(mem_ctx, &state,
2613 struct smbXcli_negprot_state);
2619 state->timeout_msec = timeout_msec;
2620 state->min_protocol = min_protocol;
2621 state->max_protocol = max_protocol;
2623 if (min_protocol == PROTOCOL_NONE) {
2624 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2625 return tevent_req_post(req, ev);
2628 if (max_protocol == PROTOCOL_NONE) {
2629 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2630 return tevent_req_post(req, ev);
2633 if (min_protocol > max_protocol) {
2634 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2635 return tevent_req_post(req, ev);
2638 if ((min_protocol < PROTOCOL_SMB2_02) &&
2639 (max_protocol < PROTOCOL_SMB2_02)) {
2643 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
2645 subreq = smbXcli_negprot_smb1_subreq(state);
2646 if (tevent_req_nomem(subreq, req)) {
2647 return tevent_req_post(req, ev);
2649 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
2653 if ((min_protocol >= PROTOCOL_SMB2_02) &&
2654 (max_protocol >= PROTOCOL_SMB2_02)) {
2658 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2660 subreq = smbXcli_negprot_smb2_subreq(state);
2661 if (tevent_req_nomem(subreq, req)) {
2662 return tevent_req_post(req, ev);
2664 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
2669 * We send an SMB1 negprot with the SMB2 dialects
2670 * and expect a SMB1 or a SMB2 response.
2672 * smbXcli_negprot_dispatch_incoming() will fix the
2673 * callback to match protocol of the response.
2675 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
2677 subreq = smbXcli_negprot_smb1_subreq(state);
2678 if (tevent_req_nomem(subreq, req)) {
2679 return tevent_req_post(req, ev);
2681 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
2685 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
2687 struct tevent_req *req =
2688 tevent_req_callback_data(subreq,
2693 * we just want the low level error
2695 status = tevent_req_simple_recv_ntstatus(subreq);
2696 TALLOC_FREE(subreq);
2697 if (tevent_req_nterror(req, status)) {
2701 /* this should never happen */
2702 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2705 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
2708 DATA_BLOB bytes = data_blob_null;
2712 /* setup the protocol strings */
2713 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2717 if (smb1cli_prots[i].proto < state->min_protocol) {
2721 if (smb1cli_prots[i].proto > state->max_protocol) {
2725 ok = data_blob_append(state, &bytes, &c, sizeof(c));
2731 * We now it is already ascii and
2732 * we want NULL termination.
2734 ok = data_blob_append(state, &bytes,
2735 smb1cli_prots[i].smb1_name,
2736 strlen(smb1cli_prots[i].smb1_name)+1);
2742 smb1cli_req_flags(state->max_protocol,
2743 state->conn->smb1.client.capabilities,
2748 return smb1cli_req_send(state, state->ev, state->conn,
2752 state->timeout_msec,
2753 0xFFFE, 0, 0, /* pid, tid, uid */
2754 0, NULL, /* wct, vwv */
2755 bytes.length, bytes.data);
2758 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
2760 struct tevent_req *req =
2761 tevent_req_callback_data(subreq,
2763 struct smbXcli_negprot_state *state =
2764 tevent_req_data(req,
2765 struct smbXcli_negprot_state);
2766 struct smbXcli_conn *conn = state->conn;
2767 struct iovec *recv_iov = NULL;
2776 size_t num_prots = 0;
2778 uint32_t client_capabilities = conn->smb1.client.capabilities;
2779 uint32_t both_capabilities;
2780 uint32_t server_capabilities = 0;
2781 uint32_t capabilities;
2782 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
2783 uint32_t server_max_xmit = 0;
2785 uint32_t server_max_mux = 0;
2786 uint16_t server_security_mode = 0;
2787 uint32_t server_session_key = 0;
2788 bool server_readbraw = false;
2789 bool server_writebraw = false;
2790 bool server_lockread = false;
2791 bool server_writeunlock = false;
2792 struct GUID server_guid = GUID_zero();
2793 DATA_BLOB server_gss_blob = data_blob_null;
2794 uint8_t server_challenge[8];
2795 char *server_workgroup = NULL;
2796 char *server_name = NULL;
2797 int server_time_zone = 0;
2798 NTTIME server_system_time = 0;
2799 static const struct smb1cli_req_expected_response expected[] = {
2801 .status = NT_STATUS_OK,
2802 .wct = 0x11, /* NT1 */
2805 .status = NT_STATUS_OK,
2806 .wct = 0x0D, /* LM */
2809 .status = NT_STATUS_OK,
2810 .wct = 0x01, /* CORE */
2814 ZERO_STRUCT(server_challenge);
2816 status = smb1cli_req_recv(subreq, state,
2821 NULL, /* pvwv_offset */
2824 NULL, /* pbytes_offset */
2826 expected, ARRAY_SIZE(expected));
2827 TALLOC_FREE(subreq);
2828 if (tevent_req_nterror(req, status)) {
2832 flags = CVAL(inhdr, HDR_FLG);
2834 protnum = SVAL(vwv, 0);
2836 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2837 if (smb1cli_prots[i].proto < state->min_protocol) {
2841 if (smb1cli_prots[i].proto > state->max_protocol) {
2845 if (protnum != num_prots) {
2850 conn->protocol = smb1cli_prots[i].proto;
2854 if (conn->protocol == PROTOCOL_NONE) {
2855 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2859 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
2860 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
2861 "and the selected protocol level doesn't support it.\n"));
2862 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
2866 if (flags & FLAG_SUPPORT_LOCKREAD) {
2867 server_lockread = true;
2868 server_writeunlock = true;
2871 if (conn->protocol >= PROTOCOL_NT1) {
2872 const char *client_signing = NULL;
2873 bool server_mandatory = false;
2874 bool server_allowed = false;
2875 const char *server_signing = NULL;
2880 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2885 server_security_mode = CVAL(vwv + 1, 0);
2886 server_max_mux = SVAL(vwv + 1, 1);
2887 server_max_xmit = IVAL(vwv + 3, 1);
2888 server_session_key = IVAL(vwv + 7, 1);
2889 server_time_zone = SVALS(vwv + 15, 1);
2890 server_time_zone *= 60;
2891 /* this time arrives in real GMT */
2892 server_system_time = BVAL(vwv + 11, 1);
2893 server_capabilities = IVAL(vwv + 9, 1);
2895 key_len = CVAL(vwv + 16, 1);
2897 if (server_capabilities & CAP_RAW_MODE) {
2898 server_readbraw = true;
2899 server_writebraw = true;
2901 if (server_capabilities & CAP_LOCK_AND_READ) {
2902 server_lockread = true;
2905 if (server_capabilities & CAP_EXTENDED_SECURITY) {
2906 DATA_BLOB blob1, blob2;
2908 if (num_bytes < 16) {
2909 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2913 blob1 = data_blob_const(bytes, 16);
2914 status = GUID_from_data_blob(&blob1, &server_guid);
2915 if (tevent_req_nterror(req, status)) {
2919 blob1 = data_blob_const(bytes+16, num_bytes-16);
2920 blob2 = data_blob_dup_talloc(state, blob1);
2921 if (blob1.length > 0 &&
2922 tevent_req_nomem(blob2.data, req)) {
2925 server_gss_blob = blob2;
2927 DATA_BLOB blob1, blob2;
2929 if (num_bytes < key_len) {
2930 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2934 if (key_len != 0 && key_len != 8) {
2935 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2940 memcpy(server_challenge, bytes, 8);
2943 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
2944 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
2945 if (blob1.length > 0) {
2948 len = utf16_len_n(blob1.data,
2952 ok = convert_string_talloc(state,
2960 status = map_nt_error_from_unix_common(errno);
2961 tevent_req_nterror(req, status);
2966 blob2.data += blob1.length;
2967 blob2.length -= blob1.length;
2968 if (blob2.length > 0) {
2971 len = utf16_len_n(blob1.data,
2975 ok = convert_string_talloc(state,
2983 status = map_nt_error_from_unix_common(errno);
2984 tevent_req_nterror(req, status);
2990 client_signing = "disabled";
2991 if (conn->allow_signing) {
2992 client_signing = "allowed";
2994 if (conn->mandatory_signing) {
2995 client_signing = "required";
2998 server_signing = "not supported";
2999 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3000 server_signing = "supported";
3001 server_allowed = true;
3003 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3004 server_signing = "required";
3005 server_mandatory = true;
3008 ok = smb_signing_set_negotiated(conn->smb1.signing,
3012 DEBUG(1,("cli_negprot: SMB signing is required, "
3013 "but client[%s] and server[%s] mismatch\n",
3014 client_signing, server_signing));
3015 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3019 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3025 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3029 server_security_mode = SVAL(vwv + 1, 0);
3030 server_max_xmit = SVAL(vwv + 2, 0);
3031 server_max_mux = SVAL(vwv + 3, 0);
3032 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3033 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3034 server_session_key = IVAL(vwv + 6, 0);
3035 server_time_zone = SVALS(vwv + 10, 0);
3036 server_time_zone *= 60;
3037 /* this time is converted to GMT by make_unix_date */
3038 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3039 unix_to_nt_time(&server_system_time, t);
3040 key_len = SVAL(vwv + 11, 0);
3042 if (num_bytes < key_len) {
3043 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3047 if (key_len != 0 && key_len != 8) {
3048 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3053 memcpy(server_challenge, bytes, 8);
3056 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3057 if (blob1.length > 0) {
3061 len = utf16_len_n(blob1.data,
3065 ok = convert_string_talloc(state,
3073 status = map_nt_error_from_unix_common(errno);
3074 tevent_req_nterror(req, status);
3080 /* the old core protocol */
3081 server_time_zone = get_time_zone(time(NULL));
3082 server_max_xmit = 1024;
3086 if (server_max_xmit < 1024) {
3087 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3091 if (server_max_mux < 1) {
3092 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3097 * Now calculate the negotiated capabilities
3098 * based on the mask for:
3099 * - client only flags
3100 * - flags used in both directions
3101 * - server only flags
3103 both_capabilities = client_capabilities & server_capabilities;
3104 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3105 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3106 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3108 max_xmit = MIN(client_max_xmit, server_max_xmit);
3110 conn->smb1.server.capabilities = server_capabilities;
3111 conn->smb1.capabilities = capabilities;
3113 conn->smb1.server.max_xmit = server_max_xmit;
3114 conn->smb1.max_xmit = max_xmit;
3116 conn->smb1.server.max_mux = server_max_mux;
3118 conn->smb1.server.security_mode = server_security_mode;
3120 conn->smb1.server.readbraw = server_readbraw;
3121 conn->smb1.server.writebraw = server_writebraw;
3122 conn->smb1.server.lockread = server_lockread;
3123 conn->smb1.server.writeunlock = server_writeunlock;
3125 conn->smb1.server.session_key = server_session_key;
3127 talloc_steal(conn, server_gss_blob.data);
3128 conn->smb1.server.gss_blob = server_gss_blob;
3129 conn->smb1.server.guid = server_guid;
3130 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3131 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3132 conn->smb1.server.name = talloc_move(conn, &server_name);
3134 conn->smb1.server.time_zone = server_time_zone;
3135 conn->smb1.server.system_time = server_system_time;
3137 tevent_req_done(req);
3140 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3144 uint16_t dialect_count = 0;
3146 buf = state->smb2.dyn;
3147 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3148 if (smb2cli_prots[i].proto < state->min_protocol) {
3152 if (smb2cli_prots[i].proto > state->max_protocol) {
3156 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3160 buf = state->smb2.fixed;
3162 SSVAL(buf, 2, dialect_count);
3163 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3164 SSVAL(buf, 6, 0); /* Reserved */
3165 SSVAL(buf, 8, 0); /* Capabilities */
3166 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3170 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3172 if (!NT_STATUS_IS_OK(status)) {
3175 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3177 memset(buf+12, 0, 16); /* ClientGuid */
3179 SBVAL(buf, 28, 0); /* ClientStartTime */
3181 return smb2cli_req_send(state, state->ev,
3182 state->conn, SMB2_OP_NEGPROT,
3184 state->timeout_msec,
3185 0xFEFF, 0, 0, /* pid, tid, uid */
3186 state->smb2.fixed, sizeof(state->smb2.fixed),
3187 state->smb2.dyn, dialect_count*2);
3190 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3192 struct tevent_req *req =
3193 tevent_req_callback_data(subreq,
3195 struct smbXcli_negprot_state *state =
3196 tevent_req_data(req,
3197 struct smbXcli_negprot_state);
3198 struct smbXcli_conn *conn = state->conn;
3199 size_t security_offset, security_length;
3205 uint16_t dialect_revision;
3206 static const struct smb2cli_req_expected_response expected[] = {
3208 .status = NT_STATUS_OK,
3213 status = smb2cli_req_recv(subreq, state, &iov,
3214 expected, ARRAY_SIZE(expected));
3215 TALLOC_FREE(subreq);
3216 if (tevent_req_nterror(req, status)) {
3220 body = (uint8_t *)iov[1].iov_base;
3222 dialect_revision = SVAL(body, 4);
3224 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3225 if (smb2cli_prots[i].proto < state->min_protocol) {
3229 if (smb2cli_prots[i].proto > state->max_protocol) {
3233 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3237 conn->protocol = smb2cli_prots[i].proto;
3241 if (conn->protocol == PROTOCOL_NONE) {
3242 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3243 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3247 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3248 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3252 /* make sure we do not loop forever */
3253 state->min_protocol = PROTOCOL_SMB2_02;
3256 * send a SMB2 negprot, in order to negotiate
3257 * the SMB2 dialect. This needs to use the
3260 state->conn->smb2.mid = 1;
3261 subreq = smbXcli_negprot_smb2_subreq(state);
3262 if (tevent_req_nomem(subreq, req)) {
3265 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3269 conn->smb2.server.security_mode = SVAL(body, 2);
3271 blob = data_blob_const(body + 8, 16);
3272 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3273 if (tevent_req_nterror(req, status)) {
3277 conn->smb2.server.capabilities = IVAL(body, 24);
3278 conn->smb2.server.max_trans_size= IVAL(body, 28);
3279 conn->smb2.server.max_read_size = IVAL(body, 32);
3280 conn->smb2.server.max_write_size= IVAL(body, 36);
3281 conn->smb2.server.system_time = BVAL(body, 40);
3282 conn->smb2.server.start_time = BVAL(body, 48);
3284 security_offset = SVAL(body, 56);
3285 security_length = SVAL(body, 58);
3287 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3288 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3292 if (security_length > iov[2].iov_len) {
3293 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3297 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3300 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3304 tevent_req_done(req);
3307 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3308 TALLOC_CTX *tmp_mem,
3311 size_t num_pending = talloc_array_length(conn->pending);
3312 struct tevent_req *subreq;
3313 struct smbXcli_req_state *substate;
3314 struct tevent_req *req;
3315 struct smbXcli_negprot_state *state;
3316 uint32_t protocol_magic = IVAL(inbuf, 4);
3318 if (num_pending != 1) {
3319 return NT_STATUS_INTERNAL_ERROR;
3322 subreq = conn->pending[0];
3323 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3324 req = tevent_req_callback_data(subreq, struct tevent_req);
3325 state = tevent_req_data(req, struct smbXcli_negprot_state);
3327 switch (protocol_magic) {
3329 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3330 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3331 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3334 if (substate->smb2.recv_iov == NULL) {
3336 * For the SMB1 negprot we have move it.
3338 substate->smb2.recv_iov = substate->smb1.recv_iov;
3339 substate->smb1.recv_iov = NULL;
3342 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3343 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3344 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3347 DEBUG(10, ("Got non-SMB PDU\n"));
3348 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3351 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3353 return tevent_req_simple_recv_ntstatus(req);
3356 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3357 uint32_t timeout_msec,
3358 enum protocol_types min_protocol,
3359 enum protocol_types max_protocol)
3361 TALLOC_CTX *frame = talloc_stackframe();
3362 struct tevent_context *ev;
3363 struct tevent_req *req;
3364 NTSTATUS status = NT_STATUS_NO_MEMORY;
3367 if (smbXcli_conn_has_async_calls(conn)) {
3369 * Can't use sync call while an async call is in flight
3371 status = NT_STATUS_INVALID_PARAMETER_MIX;
3374 ev = tevent_context_init(frame);
3378 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3379 min_protocol, max_protocol);
3383 ok = tevent_req_poll(req, ev);
3385 status = map_nt_error_from_unix_common(errno);
3388 status = smbXcli_negprot_recv(req);