2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "../libcli/smb/smb_common.h"
28 #include "../libcli/smb/smb_seal.h"
29 #include "../libcli/smb/smb_signing.h"
30 #include "../libcli/smb/read_smb.h"
31 #include "smbXcli_base.h"
32 #include "librpc/ndr/libndr.h"
36 struct sockaddr_storage local_ss;
37 struct sockaddr_storage remote_ss;
38 const char *remote_name;
40 struct tevent_queue *outgoing;
41 struct tevent_req **pending;
42 struct tevent_req *read_smb_req;
44 enum protocol_types protocol;
47 bool mandatory_signing;
50 * The incoming dispatch function should return:
51 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
52 * - NT_STATUS_OK, if no more processing is desired, e.g.
53 * the dispatch function called
55 * - All other return values disconnect the connection.
57 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
63 uint32_t capabilities;
68 uint32_t capabilities;
71 uint16_t security_mode;
80 const char *workgroup;
86 uint32_t capabilities;
91 struct smb_signing_state *signing;
92 struct smb_trans_enc_state *trans_enc;
97 uint16_t security_mode;
102 uint32_t capabilities;
103 uint16_t security_mode;
105 uint32_t max_trans_size;
106 uint32_t max_read_size;
107 uint32_t max_write_size;
117 struct smbXcli_req_state {
118 struct tevent_context *ev;
119 struct smbXcli_conn *conn;
121 uint8_t length_hdr[4];
128 /* Space for the header including the wct */
129 uint8_t hdr[HDR_VWV];
132 * For normal requests, smb1cli_req_send chooses a mid.
133 * SecondaryV trans requests need to use the mid of the primary
134 * request, so we need a place to store it.
135 * Assume it is set if != 0.
140 uint8_t bytecount_buf[2];
142 #define MAX_SMB_IOV 5
143 /* length_hdr, hdr, words, byte_count, buffers */
144 struct iovec iov[1 + 3 + MAX_SMB_IOV];
148 struct tevent_req **chained_requests;
151 NTSTATUS recv_status;
152 /* always an array of 3 talloc elements */
153 struct iovec *recv_iov;
157 const uint8_t *fixed;
163 uint8_t pad[7]; /* padding space for compounding */
165 /* always an array of 3 talloc elements */
166 struct iovec *recv_iov;
170 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
173 * NT_STATUS_OK, means we do not notify the callers
175 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
177 if (conn->smb1.trans_enc) {
178 common_free_encryption_state(&conn->smb1.trans_enc);
184 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
186 const char *remote_name,
187 enum smb_signing_setting signing_state,
188 uint32_t smb1_capabilities,
189 struct GUID *client_guid)
191 struct smbXcli_conn *conn = NULL;
193 struct sockaddr *sa = NULL;
197 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
202 conn->remote_name = talloc_strdup(conn, remote_name);
203 if (conn->remote_name == NULL) {
209 ss = (void *)&conn->local_ss;
210 sa = (struct sockaddr *)ss;
211 sa_length = sizeof(conn->local_ss);
212 ret = getsockname(fd, sa, &sa_length);
216 ss = (void *)&conn->remote_ss;
217 sa = (struct sockaddr *)ss;
218 sa_length = sizeof(conn->remote_ss);
219 ret = getpeername(fd, sa, &sa_length);
224 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
225 if (conn->outgoing == NULL) {
228 conn->pending = NULL;
230 conn->protocol = PROTOCOL_NONE;
232 switch (signing_state) {
233 case SMB_SIGNING_OFF:
235 conn->allow_signing = false;
236 conn->desire_signing = false;
237 conn->mandatory_signing = false;
239 case SMB_SIGNING_DEFAULT:
240 case SMB_SIGNING_IF_REQUIRED:
241 /* if the server requires it */
242 conn->allow_signing = true;
243 conn->desire_signing = false;
244 conn->mandatory_signing = false;
246 case SMB_SIGNING_REQUIRED:
248 conn->allow_signing = true;
249 conn->desire_signing = true;
250 conn->mandatory_signing = true;
254 conn->smb1.client.capabilities = smb1_capabilities;
255 conn->smb1.client.max_xmit = UINT16_MAX;
257 conn->smb1.capabilities = conn->smb1.client.capabilities;
258 conn->smb1.max_xmit = 1024;
262 /* initialise signing */
263 conn->smb1.signing = smb_signing_init(conn,
265 conn->desire_signing,
266 conn->mandatory_signing);
267 if (!conn->smb1.signing) {
271 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
272 if (conn->mandatory_signing) {
273 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
276 conn->smb2.client.guid = *client_guid;
279 talloc_set_destructor(conn, smbXcli_conn_destructor);
287 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
293 if (conn->fd == -1) {
300 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
302 return conn->protocol;
305 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
307 if (conn->protocol >= PROTOCOL_SMB2_02) {
311 if (conn->smb1.capabilities & CAP_UNICODE) {
318 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
320 set_socket_options(conn->fd, options);
323 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
325 return &conn->local_ss;
328 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
330 return &conn->remote_ss;
333 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
335 return conn->remote_name;
338 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
340 if (conn->protocol >= PROTOCOL_SMB2_02) {
347 return conn->smb1.server.max_mux;
350 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
352 if (conn->protocol >= PROTOCOL_SMB2_02) {
353 return conn->smb2.server.system_time;
356 return conn->smb1.server.system_time;
359 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
361 if (conn->protocol >= PROTOCOL_SMB2_02) {
362 return &conn->smb2.server.gss_blob;
365 return &conn->smb1.server.gss_blob;
368 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
370 if (conn->protocol >= PROTOCOL_SMB2_02) {
371 return &conn->smb2.server.guid;
374 return &conn->smb1.server.guid;
377 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
379 return conn->smb1.capabilities;
382 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
384 return conn->smb1.max_xmit;
387 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
389 return conn->smb1.server.session_key;
392 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
394 return conn->smb1.server.challenge;
397 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
399 return conn->smb1.server.security_mode;
402 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
404 return conn->smb1.server.time_zone;
407 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
408 const DATA_BLOB user_session_key,
409 const DATA_BLOB response)
411 return smb_signing_activate(conn->smb1.signing,
416 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
417 const uint8_t *buf, uint32_t seqnum)
419 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
422 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
424 return smb_signing_is_active(conn->smb1.signing);
427 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
428 struct smb_trans_enc_state *es)
430 /* Replace the old state, if any. */
431 if (conn->smb1.trans_enc) {
432 common_free_encryption_state(&conn->smb1.trans_enc);
434 conn->smb1.trans_enc = es;
437 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
439 return common_encryption_on(conn->smb1.trans_enc);
443 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
445 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
446 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
448 if (NT_STATUS_IS_OK(status)) {
452 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
456 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
460 * Is the SMB command able to hold an AND_X successor
461 * @param[in] cmd The SMB command in question
462 * @retval Can we add a chained request after "cmd"?
464 bool smb1cli_is_andx_req(uint8_t cmd)
484 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
486 size_t num_pending = talloc_array_length(conn->pending);
492 result = conn->smb1.mid++;
493 if ((result == 0) || (result == 0xffff)) {
497 for (i=0; i<num_pending; i++) {
498 if (result == smb1cli_req_mid(conn->pending[i])) {
503 if (i == num_pending) {
509 void smbXcli_req_unset_pending(struct tevent_req *req)
511 struct smbXcli_req_state *state =
513 struct smbXcli_req_state);
514 struct smbXcli_conn *conn = state->conn;
515 size_t num_pending = talloc_array_length(conn->pending);
518 if (state->smb1.mid != 0) {
520 * This is a [nt]trans[2] request which waits
521 * for more than one reply.
526 talloc_set_destructor(req, NULL);
528 if (num_pending == 1) {
530 * The pending read_smb tevent_req is a child of
531 * conn->pending. So if nothing is pending anymore, we need to
532 * delete the socket read fde.
534 TALLOC_FREE(conn->pending);
535 conn->read_smb_req = NULL;
539 for (i=0; i<num_pending; i++) {
540 if (req == conn->pending[i]) {
544 if (i == num_pending) {
546 * Something's seriously broken. Just returning here is the
547 * right thing nevertheless, the point of this routine is to
548 * remove ourselves from conn->pending.
554 * Remove ourselves from the conn->pending array
556 for (; i < (num_pending - 1); i++) {
557 conn->pending[i] = conn->pending[i+1];
561 * No NULL check here, we're shrinking by sizeof(void *), and
562 * talloc_realloc just adjusts the size for this.
564 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
569 static int smbXcli_req_destructor(struct tevent_req *req)
571 struct smbXcli_req_state *state =
573 struct smbXcli_req_state);
576 * Make sure we really remove it from
577 * the pending array on destruction.
580 smbXcli_req_unset_pending(req);
584 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
586 bool smbXcli_req_set_pending(struct tevent_req *req)
588 struct smbXcli_req_state *state =
590 struct smbXcli_req_state);
591 struct smbXcli_conn *conn;
592 struct tevent_req **pending;
597 if (!smbXcli_conn_is_connected(conn)) {
601 num_pending = talloc_array_length(conn->pending);
603 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
605 if (pending == NULL) {
608 pending[num_pending] = req;
609 conn->pending = pending;
610 talloc_set_destructor(req, smbXcli_req_destructor);
612 if (!smbXcli_conn_receive_next(conn)) {
614 * the caller should notify the current request
616 * And all other pending requests get notified
617 * by smbXcli_conn_disconnect().
619 smbXcli_req_unset_pending(req);
620 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
627 static void smbXcli_conn_received(struct tevent_req *subreq);
629 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
631 size_t num_pending = talloc_array_length(conn->pending);
632 struct tevent_req *req;
633 struct smbXcli_req_state *state;
635 if (conn->read_smb_req != NULL) {
639 if (num_pending == 0) {
640 if (conn->smb2.mid < UINT64_MAX) {
641 /* no more pending requests, so we are done for now */
646 * If there are no more SMB2 requests possible,
647 * because we are out of message ids,
648 * we need to disconnect.
650 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
654 req = conn->pending[0];
655 state = tevent_req_data(req, struct smbXcli_req_state);
658 * We're the first ones, add the read_smb request that waits for the
659 * answer from the server
661 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
662 if (conn->read_smb_req == NULL) {
665 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
669 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
671 if (conn->fd != -1) {
677 * Cancel all pending requests. We do not do a for-loop walking
678 * conn->pending because that array changes in
679 * smbXcli_req_unset_pending.
681 while (talloc_array_length(conn->pending) > 0) {
682 struct tevent_req *req;
683 struct smbXcli_req_state *state;
684 struct tevent_req **chain;
688 req = conn->pending[0];
689 state = tevent_req_data(req, struct smbXcli_req_state);
691 if (state->smb1.chained_requests == NULL) {
693 * We're dead. No point waiting for trans2
698 smbXcli_req_unset_pending(req);
700 if (NT_STATUS_IS_OK(status)) {
701 /* do not notify the callers */
706 * we need to defer the callback, because we may notify
707 * more then one caller.
709 tevent_req_defer_callback(req, state->ev);
710 tevent_req_nterror(req, status);
714 chain = talloc_move(conn, &state->smb1.chained_requests);
715 num_chained = talloc_array_length(chain);
717 for (i=0; i<num_chained; i++) {
719 state = tevent_req_data(req, struct smbXcli_req_state);
722 * We're dead. No point waiting for trans2
727 smbXcli_req_unset_pending(req);
729 if (NT_STATUS_IS_OK(status)) {
730 /* do not notify the callers */
735 * we need to defer the callback, because we may notify
736 * more then one caller.
738 tevent_req_defer_callback(req, state->ev);
739 tevent_req_nterror(req, status);
746 * Fetch a smb request's mid. Only valid after the request has been sent by
747 * smb1cli_req_send().
749 uint16_t smb1cli_req_mid(struct tevent_req *req)
751 struct smbXcli_req_state *state =
753 struct smbXcli_req_state);
755 if (state->smb1.mid != 0) {
756 return state->smb1.mid;
759 return SVAL(state->smb1.hdr, HDR_MID);
762 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
764 struct smbXcli_req_state *state =
766 struct smbXcli_req_state);
768 state->smb1.mid = mid;
771 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
773 struct smbXcli_req_state *state =
775 struct smbXcli_req_state);
777 return state->smb1.seqnum;
780 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
782 struct smbXcli_req_state *state =
784 struct smbXcli_req_state);
786 state->smb1.seqnum = seqnum;
789 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
793 for (i=0; i<count; i++) {
794 result += iov[i].iov_len;
799 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
800 const struct iovec *iov,
803 size_t len = smbXcli_iov_len(iov, count);
808 buf = talloc_array(mem_ctx, uint8_t, len);
813 for (i=0; i<count; i++) {
814 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
815 copied += iov[i].iov_len;
820 static void smb1cli_req_flags(enum protocol_types protocol,
821 uint32_t smb1_capabilities,
823 uint8_t additional_flags,
826 uint16_t additional_flags2,
827 uint16_t clear_flags2,
833 if (protocol >= PROTOCOL_LANMAN1) {
834 flags |= FLAG_CASELESS_PATHNAMES;
835 flags |= FLAG_CANONICAL_PATHNAMES;
838 if (protocol >= PROTOCOL_LANMAN2) {
839 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
840 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
843 if (protocol >= PROTOCOL_NT1) {
844 flags2 |= FLAGS2_IS_LONG_NAME;
846 if (smb1_capabilities & CAP_UNICODE) {
847 flags2 |= FLAGS2_UNICODE_STRINGS;
849 if (smb1_capabilities & CAP_STATUS32) {
850 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
852 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
853 flags2 |= FLAGS2_EXTENDED_SECURITY;
857 flags |= additional_flags;
858 flags &= ~clear_flags;
859 flags2 |= additional_flags2;
860 flags2 &= ~clear_flags2;
866 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
867 struct tevent_context *ev,
868 struct smbXcli_conn *conn,
870 uint8_t additional_flags,
872 uint16_t additional_flags2,
873 uint16_t clear_flags2,
874 uint32_t timeout_msec,
878 uint8_t wct, uint16_t *vwv,
880 struct iovec *bytes_iov)
882 struct tevent_req *req;
883 struct smbXcli_req_state *state;
887 if (iov_count > MAX_SMB_IOV) {
889 * Should not happen :-)
894 req = tevent_req_create(mem_ctx, &state,
895 struct smbXcli_req_state);
902 state->smb1.recv_cmd = 0xFF;
903 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
904 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
905 if (state->smb1.recv_iov == NULL) {
910 smb1cli_req_flags(conn->protocol,
911 conn->smb1.capabilities,
920 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
921 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
922 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
923 SCVAL(state->smb1.hdr, HDR_FLG, flags);
924 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
925 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
926 SSVAL(state->smb1.hdr, HDR_TID, tid);
927 SSVAL(state->smb1.hdr, HDR_PID, pid);
928 SSVAL(state->smb1.hdr, HDR_UID, uid);
929 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
930 SSVAL(state->smb1.hdr, HDR_WCT, wct);
932 state->smb1.vwv = vwv;
934 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
936 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
937 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
938 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
939 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
940 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
941 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
942 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
943 state->smb1.iov[3].iov_len = sizeof(uint16_t);
945 if (iov_count != 0) {
946 memcpy(&state->smb1.iov[4], bytes_iov,
947 iov_count * sizeof(*bytes_iov));
949 state->smb1.iov_count = iov_count + 4;
951 if (timeout_msec > 0) {
952 struct timeval endtime;
954 endtime = timeval_current_ofs_msec(timeout_msec);
955 if (!tevent_req_set_endtime(req, ev, endtime)) {
960 switch (smb_command) {
965 state->one_way = true;
969 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
970 state->one_way = true;
978 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
979 struct iovec *iov, int iov_count,
985 * Obvious optimization: Make cli_calculate_sign_mac work with struct
986 * iovec directly. MD5Update would do that just fine.
990 return NT_STATUS_INVALID_PARAMETER_MIX;
992 if (iov[0].iov_len != NBT_HDR_SIZE) {
993 return NT_STATUS_INVALID_PARAMETER_MIX;
995 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
996 return NT_STATUS_INVALID_PARAMETER_MIX;
998 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
999 return NT_STATUS_INVALID_PARAMETER_MIX;
1001 if (iov[3].iov_len != sizeof(uint16_t)) {
1002 return NT_STATUS_INVALID_PARAMETER_MIX;
1005 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1007 return NT_STATUS_NO_MEMORY;
1010 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
1011 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1012 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1015 return NT_STATUS_OK;
1018 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1019 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1020 TALLOC_CTX *tmp_mem,
1023 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1024 struct smbXcli_req_state *state,
1025 struct iovec *iov, int iov_count)
1027 struct tevent_req *subreq;
1031 if (!smbXcli_conn_is_connected(state->conn)) {
1032 return NT_STATUS_CONNECTION_DISCONNECTED;
1035 if (state->conn->protocol > PROTOCOL_NT1) {
1036 return NT_STATUS_REVISION_MISMATCH;
1039 if (iov_count < 4) {
1040 return NT_STATUS_INVALID_PARAMETER_MIX;
1042 if (iov[0].iov_len != NBT_HDR_SIZE) {
1043 return NT_STATUS_INVALID_PARAMETER_MIX;
1045 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1046 return NT_STATUS_INVALID_PARAMETER_MIX;
1048 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1049 return NT_STATUS_INVALID_PARAMETER_MIX;
1051 if (iov[3].iov_len != sizeof(uint16_t)) {
1052 return NT_STATUS_INVALID_PARAMETER_MIX;
1055 if (state->smb1.mid != 0) {
1056 mid = state->smb1.mid;
1058 mid = smb1cli_alloc_mid(state->conn);
1060 SSVAL(iov[1].iov_base, HDR_MID, mid);
1062 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1064 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1065 &state->smb1.seqnum);
1067 if (!NT_STATUS_IS_OK(status)) {
1072 * If we supported multiple encrytion contexts
1073 * here we'd look up based on tid.
1075 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1076 char *buf, *enc_buf;
1078 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1080 return NT_STATUS_NO_MEMORY;
1082 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1083 (char *)buf, &enc_buf);
1085 if (!NT_STATUS_IS_OK(status)) {
1086 DEBUG(0, ("Error in encrypting client message: %s\n",
1087 nt_errstr(status)));
1090 buf = (char *)talloc_memdup(state, enc_buf,
1091 smb_len_nbt(enc_buf)+4);
1094 return NT_STATUS_NO_MEMORY;
1096 iov[0].iov_base = (void *)buf;
1097 iov[0].iov_len = talloc_get_size(buf);
1101 if (state->conn->dispatch_incoming == NULL) {
1102 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1105 subreq = writev_send(state, state->ev, state->conn->outgoing,
1106 state->conn->fd, false, iov, iov_count);
1107 if (subreq == NULL) {
1108 return NT_STATUS_NO_MEMORY;
1110 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1111 return NT_STATUS_OK;
1114 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1115 struct tevent_context *ev,
1116 struct smbXcli_conn *conn,
1117 uint8_t smb_command,
1118 uint8_t additional_flags,
1119 uint8_t clear_flags,
1120 uint16_t additional_flags2,
1121 uint16_t clear_flags2,
1122 uint32_t timeout_msec,
1126 uint8_t wct, uint16_t *vwv,
1128 const uint8_t *bytes)
1130 struct tevent_req *req;
1134 iov.iov_base = discard_const_p(void, bytes);
1135 iov.iov_len = num_bytes;
1137 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1138 additional_flags, clear_flags,
1139 additional_flags2, clear_flags2,
1146 if (!tevent_req_is_in_progress(req)) {
1147 return tevent_req_post(req, ev);
1149 status = smb1cli_req_chain_submit(&req, 1);
1150 if (tevent_req_nterror(req, status)) {
1151 return tevent_req_post(req, ev);
1156 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1158 struct tevent_req *req =
1159 tevent_req_callback_data(subreq,
1161 struct smbXcli_req_state *state =
1162 tevent_req_data(req,
1163 struct smbXcli_req_state);
1167 nwritten = writev_recv(subreq, &err);
1168 TALLOC_FREE(subreq);
1169 if (nwritten == -1) {
1170 NTSTATUS status = map_nt_error_from_unix_common(err);
1171 smbXcli_conn_disconnect(state->conn, status);
1175 if (state->one_way) {
1176 state->inbuf = NULL;
1177 tevent_req_done(req);
1181 if (!smbXcli_req_set_pending(req)) {
1182 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1187 static void smbXcli_conn_received(struct tevent_req *subreq)
1189 struct smbXcli_conn *conn =
1190 tevent_req_callback_data(subreq,
1191 struct smbXcli_conn);
1192 TALLOC_CTX *frame = talloc_stackframe();
1198 if (subreq != conn->read_smb_req) {
1199 DEBUG(1, ("Internal error: cli_smb_received called with "
1200 "unexpected subreq\n"));
1201 status = NT_STATUS_INTERNAL_ERROR;
1202 smbXcli_conn_disconnect(conn, status);
1206 conn->read_smb_req = NULL;
1208 received = read_smb_recv(subreq, frame, &inbuf, &err);
1209 TALLOC_FREE(subreq);
1210 if (received == -1) {
1211 status = map_nt_error_from_unix_common(err);
1212 smbXcli_conn_disconnect(conn, status);
1217 status = conn->dispatch_incoming(conn, frame, inbuf);
1219 if (NT_STATUS_IS_OK(status)) {
1221 * We should not do any more processing
1222 * as the dispatch function called
1223 * tevent_req_done().
1226 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1228 * We got an error, so notify all pending requests
1230 smbXcli_conn_disconnect(conn, status);
1235 * We got NT_STATUS_RETRY, so we may ask for a
1236 * next incoming pdu.
1238 if (!smbXcli_conn_receive_next(conn)) {
1239 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1243 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1244 struct iovec **piov, int *pnum_iov)
1255 buflen = smb_len_nbt(buf);
1258 hdr = buf + NBT_HDR_SIZE;
1260 if (buflen < MIN_SMB_SIZE) {
1261 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1265 * This returns iovec elements in the following order:
1280 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1282 return NT_STATUS_NO_MEMORY;
1284 iov[0].iov_base = hdr;
1285 iov[0].iov_len = HDR_WCT;
1288 cmd = CVAL(hdr, HDR_COM);
1292 size_t len = buflen - taken;
1294 struct iovec *iov_tmp;
1301 * we need at least WCT and BCC
1303 needed = sizeof(uint8_t) + sizeof(uint16_t);
1305 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1306 __location__, (int)len, (int)needed));
1311 * Now we check if the specified words are there
1313 wct = CVAL(hdr, wct_ofs);
1314 needed += wct * sizeof(uint16_t);
1316 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1317 __location__, (int)len, (int)needed));
1322 * Now we check if the specified bytes are there
1324 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1325 bcc = SVAL(hdr, bcc_ofs);
1326 needed += bcc * sizeof(uint8_t);
1328 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1329 __location__, (int)len, (int)needed));
1334 * we allocate 2 iovec structures for words and bytes
1336 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1338 if (iov_tmp == NULL) {
1340 return NT_STATUS_NO_MEMORY;
1343 cur = &iov[num_iov];
1346 cur[0].iov_len = wct * sizeof(uint16_t);
1347 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1348 cur[1].iov_len = bcc * sizeof(uint8_t);
1349 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1353 if (!smb1cli_is_andx_req(cmd)) {
1355 * If the current command does not have AndX chanining
1361 if (wct == 0 && bcc == 0) {
1363 * An empty response also ends the chain,
1364 * most likely with an error.
1370 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1371 __location__, (int)wct, (int)cmd));
1374 cmd = CVAL(cur[0].iov_base, 0);
1377 * If it is the end of the chain we are also done.
1381 wct_ofs = SVAL(cur[0].iov_base, 2);
1383 if (wct_ofs < taken) {
1384 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1386 if (wct_ofs > buflen) {
1387 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1391 * we consumed everything up to the start of the next
1397 remaining = buflen - taken;
1399 if (remaining > 0 && num_iov >= 3) {
1401 * The last DATA block gets the remaining
1402 * bytes, this is needed to support
1403 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1405 iov[num_iov-1].iov_len += remaining;
1409 *pnum_iov = num_iov;
1410 return NT_STATUS_OK;
1414 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1417 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1418 TALLOC_CTX *tmp_mem,
1421 struct tevent_req *req;
1422 struct smbXcli_req_state *state;
1429 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1430 struct iovec *iov = NULL;
1432 struct tevent_req **chain = NULL;
1433 size_t num_chained = 0;
1434 size_t num_responses = 0;
1436 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1437 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1438 DEBUG(10, ("Got non-SMB PDU\n"));
1439 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1443 * If we supported multiple encrytion contexts
1444 * here we'd look up based on tid.
1446 if (common_encryption_on(conn->smb1.trans_enc)
1447 && (CVAL(inbuf, 0) == 0)) {
1448 uint16_t enc_ctx_num;
1450 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1451 if (!NT_STATUS_IS_OK(status)) {
1452 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1453 nt_errstr(status)));
1457 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1458 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1460 conn->smb1.trans_enc->enc_ctx_num));
1461 return NT_STATUS_INVALID_HANDLE;
1464 status = common_decrypt_buffer(conn->smb1.trans_enc,
1466 if (!NT_STATUS_IS_OK(status)) {
1467 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1468 nt_errstr(status)));
1473 mid = SVAL(inhdr, HDR_MID);
1474 num_pending = talloc_array_length(conn->pending);
1476 for (i=0; i<num_pending; i++) {
1477 if (mid == smb1cli_req_mid(conn->pending[i])) {
1481 if (i == num_pending) {
1482 /* Dump unexpected reply */
1483 return NT_STATUS_RETRY;
1486 oplock_break = false;
1488 if (mid == 0xffff) {
1490 * Paranoia checks that this is really an oplock break request.
1492 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1493 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1494 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1495 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1496 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1498 if (!oplock_break) {
1499 /* Dump unexpected reply */
1500 return NT_STATUS_RETRY;
1504 req = conn->pending[i];
1505 state = tevent_req_data(req, struct smbXcli_req_state);
1507 if (!oplock_break /* oplock breaks are not signed */
1508 && !smb_signing_check_pdu(conn->smb1.signing,
1509 inbuf, state->smb1.seqnum+1)) {
1510 DEBUG(10, ("cli_check_sign_mac failed\n"));
1511 return NT_STATUS_ACCESS_DENIED;
1514 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1516 if (!NT_STATUS_IS_OK(status)) {
1517 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1518 nt_errstr(status)));
1522 cmd = CVAL(inhdr, HDR_COM);
1523 status = smb1cli_pull_raw_error(inhdr);
1525 if (state->smb1.chained_requests == NULL) {
1527 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1530 smbXcli_req_unset_pending(req);
1532 state->smb1.recv_cmd = cmd;
1533 state->smb1.recv_status = status;
1534 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1536 state->smb1.recv_iov[0] = iov[0];
1537 state->smb1.recv_iov[1] = iov[1];
1538 state->smb1.recv_iov[2] = iov[2];
1540 if (talloc_array_length(conn->pending) == 0) {
1541 tevent_req_done(req);
1542 return NT_STATUS_OK;
1545 tevent_req_defer_callback(req, state->ev);
1546 tevent_req_done(req);
1547 return NT_STATUS_RETRY;
1550 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1551 num_chained = talloc_array_length(chain);
1552 num_responses = (num_iov - 1)/2;
1554 if (num_responses > num_chained) {
1555 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1558 for (i=0; i<num_chained; i++) {
1559 size_t iov_idx = 1 + (i*2);
1560 struct iovec *cur = &iov[iov_idx];
1564 state = tevent_req_data(req, struct smbXcli_req_state);
1566 smbXcli_req_unset_pending(req);
1569 * as we finish multiple requests here
1570 * we need to defer the callbacks as
1571 * they could destroy our current stack state.
1573 tevent_req_defer_callback(req, state->ev);
1575 if (i >= num_responses) {
1576 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1580 state->smb1.recv_cmd = cmd;
1582 if (i == (num_responses - 1)) {
1584 * The last request in the chain gets the status
1586 state->smb1.recv_status = status;
1588 cmd = CVAL(cur[0].iov_base, 0);
1589 state->smb1.recv_status = NT_STATUS_OK;
1592 state->inbuf = inbuf;
1595 * Note: here we use talloc_reference() in a way
1596 * that does not expose it to the caller.
1598 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1599 if (tevent_req_nomem(inbuf_ref, req)) {
1603 /* copy the related buffers */
1604 state->smb1.recv_iov[0] = iov[0];
1605 state->smb1.recv_iov[1] = cur[0];
1606 state->smb1.recv_iov[2] = cur[1];
1608 tevent_req_done(req);
1611 return NT_STATUS_RETRY;
1614 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1615 TALLOC_CTX *mem_ctx,
1616 struct iovec **piov,
1620 uint32_t *pvwv_offset,
1621 uint32_t *pnum_bytes,
1623 uint32_t *pbytes_offset,
1625 const struct smb1cli_req_expected_response *expected,
1626 size_t num_expected)
1628 struct smbXcli_req_state *state =
1629 tevent_req_data(req,
1630 struct smbXcli_req_state);
1631 NTSTATUS status = NT_STATUS_OK;
1632 struct iovec *recv_iov = NULL;
1633 uint8_t *hdr = NULL;
1635 uint32_t vwv_offset = 0;
1636 uint16_t *vwv = NULL;
1637 uint32_t num_bytes = 0;
1638 uint32_t bytes_offset = 0;
1639 uint8_t *bytes = NULL;
1641 bool found_status = false;
1642 bool found_size = false;
1656 if (pvwv_offset != NULL) {
1659 if (pnum_bytes != NULL) {
1662 if (pbytes != NULL) {
1665 if (pbytes_offset != NULL) {
1668 if (pinbuf != NULL) {
1672 if (state->inbuf != NULL) {
1673 recv_iov = state->smb1.recv_iov;
1674 hdr = (uint8_t *)recv_iov[0].iov_base;
1675 wct = recv_iov[1].iov_len/2;
1676 vwv = (uint16_t *)recv_iov[1].iov_base;
1677 vwv_offset = PTR_DIFF(vwv, hdr);
1678 num_bytes = recv_iov[2].iov_len;
1679 bytes = (uint8_t *)recv_iov[2].iov_base;
1680 bytes_offset = PTR_DIFF(bytes, hdr);
1683 if (tevent_req_is_nterror(req, &status)) {
1684 for (i=0; i < num_expected; i++) {
1685 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1686 found_status = true;
1692 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1698 if (num_expected == 0) {
1699 found_status = true;
1703 status = state->smb1.recv_status;
1705 for (i=0; i < num_expected; i++) {
1706 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1710 found_status = true;
1711 if (expected[i].wct == 0) {
1716 if (expected[i].wct == wct) {
1722 if (!found_status) {
1727 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1731 *piov = talloc_move(mem_ctx, &recv_iov);
1743 if (pvwv_offset != NULL) {
1744 *pvwv_offset = vwv_offset;
1746 if (pnum_bytes != NULL) {
1747 *pnum_bytes = num_bytes;
1749 if (pbytes != NULL) {
1752 if (pbytes_offset != NULL) {
1753 *pbytes_offset = bytes_offset;
1755 if (pinbuf != NULL) {
1756 *pinbuf = state->inbuf;
1762 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1769 for (i=0; i<num_reqs; i++) {
1770 struct smbXcli_req_state *state;
1771 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1772 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1773 state->smb1.iov_count-2);
1774 wct_ofs = (wct_ofs + 3) & ~3;
1779 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1781 struct smbXcli_req_state *first_state =
1782 tevent_req_data(reqs[0],
1783 struct smbXcli_req_state);
1784 struct smbXcli_req_state *state;
1786 size_t chain_padding = 0;
1788 struct iovec *iov = NULL;
1789 struct iovec *this_iov;
1793 if (num_reqs == 1) {
1794 return smb1cli_req_writev_submit(reqs[0], first_state,
1795 first_state->smb1.iov,
1796 first_state->smb1.iov_count);
1800 for (i=0; i<num_reqs; i++) {
1801 if (!tevent_req_is_in_progress(reqs[i])) {
1802 return NT_STATUS_INTERNAL_ERROR;
1805 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1807 if (state->smb1.iov_count < 4) {
1808 return NT_STATUS_INVALID_PARAMETER_MIX;
1813 * The NBT and SMB header
1826 iovlen += state->smb1.iov_count - 2;
1829 iov = talloc_zero_array(first_state, struct iovec, iovlen);
1831 return NT_STATUS_NO_MEMORY;
1834 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1835 first_state, reqs, sizeof(*reqs) * num_reqs);
1836 if (first_state->smb1.chained_requests == NULL) {
1838 return NT_STATUS_NO_MEMORY;
1841 wct_offset = HDR_WCT;
1844 for (i=0; i<num_reqs; i++) {
1845 size_t next_padding = 0;
1848 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1850 if (i < num_reqs-1) {
1851 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1852 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1854 TALLOC_FREE(first_state->smb1.chained_requests);
1855 return NT_STATUS_INVALID_PARAMETER_MIX;
1859 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1860 state->smb1.iov_count-2) + 1;
1861 if ((wct_offset % 4) != 0) {
1862 next_padding = 4 - (wct_offset % 4);
1864 wct_offset += next_padding;
1865 vwv = state->smb1.vwv;
1867 if (i < num_reqs-1) {
1868 struct smbXcli_req_state *next_state =
1869 tevent_req_data(reqs[i+1],
1870 struct smbXcli_req_state);
1871 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1873 SSVAL(vwv+1, 0, wct_offset);
1874 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1875 /* properly end the chain */
1876 SCVAL(vwv+0, 0, 0xff);
1877 SCVAL(vwv+0, 1, 0xff);
1883 * The NBT and SMB header
1885 this_iov[0] = state->smb1.iov[0];
1886 this_iov[1] = state->smb1.iov[1];
1890 * This one is a bit subtle. We have to add
1891 * chain_padding bytes between the requests, and we
1892 * have to also include the wct field of the
1893 * subsequent requests. We use the subsequent header
1894 * for the padding, it contains the wct field in its
1897 this_iov[0].iov_len = chain_padding+1;
1898 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1899 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1900 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1905 * copy the words and bytes
1907 memcpy(this_iov, state->smb1.iov+2,
1908 sizeof(struct iovec) * (state->smb1.iov_count-2));
1909 this_iov += state->smb1.iov_count - 2;
1910 chain_padding = next_padding;
1913 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1914 if (nbt_len > first_state->conn->smb1.max_xmit) {
1916 TALLOC_FREE(first_state->smb1.chained_requests);
1917 return NT_STATUS_INVALID_PARAMETER_MIX;
1920 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
1921 if (!NT_STATUS_IS_OK(status)) {
1923 TALLOC_FREE(first_state->smb1.chained_requests);
1927 return NT_STATUS_OK;
1930 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1932 return ((tevent_queue_length(conn->outgoing) != 0)
1933 || (talloc_array_length(conn->pending) != 0));
1936 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
1938 return conn->smb2.server.capabilities;
1941 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
1943 return conn->smb2.server.security_mode;
1946 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
1948 return conn->smb2.server.max_trans_size;
1951 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
1953 return conn->smb2.server.max_read_size;
1956 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
1958 return conn->smb2.server.max_write_size;
1961 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
1962 struct tevent_context *ev,
1963 struct smbXcli_conn *conn,
1965 uint32_t additional_flags,
1966 uint32_t clear_flags,
1967 uint32_t timeout_msec,
1971 const uint8_t *fixed,
1976 struct tevent_req *req;
1977 struct smbXcli_req_state *state;
1980 req = tevent_req_create(mem_ctx, &state,
1981 struct smbXcli_req_state);
1989 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
1990 if (state->smb2.recv_iov == NULL) {
1995 flags |= additional_flags;
1996 flags &= ~clear_flags;
1998 state->smb2.fixed = fixed;
1999 state->smb2.fixed_len = fixed_len;
2000 state->smb2.dyn = dyn;
2001 state->smb2.dyn_len = dyn_len;
2003 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2004 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2005 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, 1);
2006 SIVAL(state->smb2.hdr, SMB2_HDR_STATUS, NT_STATUS_V(NT_STATUS_OK));
2007 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2008 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, 31);
2009 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2010 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2011 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2012 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2015 case SMB2_OP_CANCEL:
2016 state->one_way = true;
2020 * If this is a dummy request, it will have
2021 * UINT64_MAX as message id.
2022 * If we send on break acknowledgement,
2023 * this gets overwritten later.
2025 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2029 if (timeout_msec > 0) {
2030 struct timeval endtime;
2032 endtime = timeval_current_ofs_msec(timeout_msec);
2033 if (!tevent_req_set_endtime(req, ev, endtime)) {
2041 static void smb2cli_writev_done(struct tevent_req *subreq);
2042 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2043 TALLOC_CTX *tmp_mem,
2046 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2049 struct smbXcli_req_state *state;
2050 struct tevent_req *subreq;
2052 int i, num_iov, nbt_len;
2055 * 1 for the nbt length
2056 * per request: HDR, fixed, dyn, padding
2057 * -1 because the last one does not need padding
2060 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2062 return NT_STATUS_NO_MEMORY;
2068 for (i=0; i<num_reqs; i++) {
2073 if (!tevent_req_is_in_progress(reqs[i])) {
2074 return NT_STATUS_INTERNAL_ERROR;
2077 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2079 if (!smbXcli_conn_is_connected(state->conn)) {
2080 return NT_STATUS_CONNECTION_DISCONNECTED;
2083 if ((state->conn->protocol != PROTOCOL_NONE) &&
2084 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2085 return NT_STATUS_REVISION_MISMATCH;
2088 if (state->conn->smb2.mid == UINT64_MAX) {
2089 return NT_STATUS_CONNECTION_ABORTED;
2092 mid = state->conn->smb2.mid;
2093 state->conn->smb2.mid += 1;
2095 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2097 iov[num_iov].iov_base = state->smb2.hdr;
2098 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2101 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2102 iov[num_iov].iov_len = state->smb2.fixed_len;
2105 if (state->smb2.dyn != NULL) {
2106 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2107 iov[num_iov].iov_len = state->smb2.dyn_len;
2111 reqlen = sizeof(state->smb2.hdr);
2112 reqlen += state->smb2.fixed_len;
2113 reqlen += state->smb2.dyn_len;
2115 if (i < num_reqs-1) {
2116 if ((reqlen % 8) > 0) {
2117 uint8_t pad = 8 - (reqlen % 8);
2118 iov[num_iov].iov_base = state->smb2.pad;
2119 iov[num_iov].iov_len = pad;
2123 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2127 ret = smbXcli_req_set_pending(reqs[i]);
2129 return NT_STATUS_NO_MEMORY;
2134 * TODO: Do signing here
2137 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2138 _smb_setlen_tcp(state->length_hdr, nbt_len);
2139 iov[0].iov_base = state->length_hdr;
2140 iov[0].iov_len = sizeof(state->length_hdr);
2142 if (state->conn->dispatch_incoming == NULL) {
2143 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2146 subreq = writev_send(state, state->ev, state->conn->outgoing,
2147 state->conn->fd, false, iov, num_iov);
2148 if (subreq == NULL) {
2149 return NT_STATUS_NO_MEMORY;
2151 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2152 return NT_STATUS_OK;
2155 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2156 struct tevent_context *ev,
2157 struct smbXcli_conn *conn,
2159 uint32_t additional_flags,
2160 uint32_t clear_flags,
2161 uint32_t timeout_msec,
2165 const uint8_t *fixed,
2170 struct tevent_req *req;
2173 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2174 additional_flags, clear_flags,
2177 fixed, fixed_len, dyn, dyn_len);
2181 if (!tevent_req_is_in_progress(req)) {
2182 return tevent_req_post(req, ev);
2184 status = smb2cli_req_compound_submit(&req, 1);
2185 if (tevent_req_nterror(req, status)) {
2186 return tevent_req_post(req, ev);
2191 static void smb2cli_writev_done(struct tevent_req *subreq)
2193 struct tevent_req *req =
2194 tevent_req_callback_data(subreq,
2196 struct smbXcli_req_state *state =
2197 tevent_req_data(req,
2198 struct smbXcli_req_state);
2202 nwritten = writev_recv(subreq, &err);
2203 TALLOC_FREE(subreq);
2204 if (nwritten == -1) {
2205 /* here, we need to notify all pending requests */
2206 NTSTATUS status = map_nt_error_from_unix_common(err);
2207 smbXcli_conn_disconnect(state->conn, status);
2212 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2213 struct iovec **piov, int *pnum_iov)
2223 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2225 return NT_STATUS_NO_MEMORY;
2228 buflen = smb_len_tcp(buf);
2230 first_hdr = buf + NBT_HDR_SIZE;
2232 while (taken < buflen) {
2233 size_t len = buflen - taken;
2234 uint8_t *hdr = first_hdr + taken;
2237 size_t next_command_ofs;
2239 struct iovec *iov_tmp;
2242 * We need the header plus the body length field
2245 if (len < SMB2_HDR_BODY + 2) {
2246 DEBUG(10, ("%d bytes left, expected at least %d\n",
2247 (int)len, SMB2_HDR_BODY));
2250 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2251 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2255 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2256 DEBUG(10, ("Got HDR len %d, expected %d\n",
2257 SVAL(hdr, 4), SMB2_HDR_BODY));
2262 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2263 body_size = SVAL(hdr, SMB2_HDR_BODY);
2265 if (next_command_ofs != 0) {
2266 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2269 if (next_command_ofs > full_size) {
2272 full_size = next_command_ofs;
2274 if (body_size < 2) {
2277 body_size &= 0xfffe;
2279 if (body_size > (full_size - SMB2_HDR_BODY)) {
2283 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2285 if (iov_tmp == NULL) {
2287 return NT_STATUS_NO_MEMORY;
2290 cur = &iov[num_iov];
2293 cur[0].iov_base = hdr;
2294 cur[0].iov_len = SMB2_HDR_BODY;
2295 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2296 cur[1].iov_len = body_size;
2297 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2298 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2304 *pnum_iov = num_iov;
2305 return NT_STATUS_OK;
2309 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2312 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2315 size_t num_pending = talloc_array_length(conn->pending);
2318 for (i=0; i<num_pending; i++) {
2319 struct tevent_req *req = conn->pending[i];
2320 struct smbXcli_req_state *state =
2321 tevent_req_data(req,
2322 struct smbXcli_req_state);
2324 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2331 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2332 TALLOC_CTX *tmp_mem,
2335 struct tevent_req *req;
2336 struct smbXcli_req_state *state = NULL;
2342 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2344 if (!NT_STATUS_IS_OK(status)) {
2348 for (i=0; i<num_iov; i+=3) {
2349 uint8_t *inbuf_ref = NULL;
2350 struct iovec *cur = &iov[i];
2351 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2352 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2353 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2354 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2355 uint16_t req_opcode;
2357 req = smb2cli_conn_find_pending(conn, mid);
2359 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2361 state = tevent_req_data(req, struct smbXcli_req_state);
2363 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2364 if (opcode != req_opcode) {
2365 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2368 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2369 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2372 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2373 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2374 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2375 uint32_t req_flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2376 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2378 req_flags |= SMB2_HDR_FLAG_ASYNC;
2379 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2380 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2384 smbXcli_req_unset_pending(req);
2387 * There might be more than one response
2388 * we need to defer the notifications
2390 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2395 tevent_req_defer_callback(req, state->ev);
2399 * Note: here we use talloc_reference() in a way
2400 * that does not expose it to the caller.
2402 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2403 if (tevent_req_nomem(inbuf_ref, req)) {
2407 /* copy the related buffers */
2408 state->smb2.recv_iov[0] = cur[0];
2409 state->smb2.recv_iov[1] = cur[1];
2410 state->smb2.recv_iov[2] = cur[2];
2412 tevent_req_done(req);
2416 return NT_STATUS_RETRY;
2419 return NT_STATUS_OK;
2422 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2423 struct iovec **piov,
2424 const struct smb2cli_req_expected_response *expected,
2425 size_t num_expected)
2427 struct smbXcli_req_state *state =
2428 tevent_req_data(req,
2429 struct smbXcli_req_state);
2432 bool found_status = false;
2433 bool found_size = false;
2440 if (tevent_req_is_nterror(req, &status)) {
2441 for (i=0; i < num_expected; i++) {
2442 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2443 found_status = true;
2449 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2455 if (num_expected == 0) {
2456 found_status = true;
2460 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2461 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2463 for (i=0; i < num_expected; i++) {
2464 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2468 found_status = true;
2469 if (expected[i].body_size == 0) {
2474 if (expected[i].body_size == body_size) {
2480 if (!found_status) {
2485 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2489 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
2495 static const struct {
2496 enum protocol_types proto;
2497 const char *smb1_name;
2498 } smb1cli_prots[] = {
2499 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
2500 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
2501 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
2502 {PROTOCOL_LANMAN1, "LANMAN1.0"},
2503 {PROTOCOL_LANMAN2, "LM1.2X002"},
2504 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
2505 {PROTOCOL_LANMAN2, "LANMAN2.1"},
2506 {PROTOCOL_LANMAN2, "Samba"},
2507 {PROTOCOL_NT1, "NT LANMAN 1.0"},
2508 {PROTOCOL_NT1, "NT LM 0.12"},
2509 {PROTOCOL_SMB2_02, "SMB 2.002"},
2512 static const struct {
2513 enum protocol_types proto;
2514 uint16_t smb2_dialect;
2515 } smb2cli_prots[] = {
2516 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
2519 struct smbXcli_negprot_state {
2520 struct smbXcli_conn *conn;
2521 struct tevent_context *ev;
2522 uint32_t timeout_msec;
2523 enum protocol_types min_protocol;
2524 enum protocol_types max_protocol;
2528 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
2532 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
2533 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
2534 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
2535 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
2536 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
2537 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
2541 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
2542 struct tevent_context *ev,
2543 struct smbXcli_conn *conn,
2544 uint32_t timeout_msec,
2545 enum protocol_types min_protocol,
2546 enum protocol_types max_protocol)
2548 struct tevent_req *req, *subreq;
2549 struct smbXcli_negprot_state *state;
2551 req = tevent_req_create(mem_ctx, &state,
2552 struct smbXcli_negprot_state);
2558 state->timeout_msec = timeout_msec;
2559 state->min_protocol = min_protocol;
2560 state->max_protocol = max_protocol;
2562 if (min_protocol == PROTOCOL_NONE) {
2563 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2564 return tevent_req_post(req, ev);
2567 if (max_protocol == PROTOCOL_NONE) {
2568 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2569 return tevent_req_post(req, ev);
2572 if (min_protocol > max_protocol) {
2573 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2574 return tevent_req_post(req, ev);
2577 if ((min_protocol < PROTOCOL_SMB2_02) &&
2578 (max_protocol < PROTOCOL_SMB2_02)) {
2582 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
2584 subreq = smbXcli_negprot_smb1_subreq(state);
2585 if (tevent_req_nomem(subreq, req)) {
2586 return tevent_req_post(req, ev);
2588 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
2592 if ((min_protocol >= PROTOCOL_SMB2_02) &&
2593 (max_protocol >= PROTOCOL_SMB2_02)) {
2597 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2599 subreq = smbXcli_negprot_smb2_subreq(state);
2600 if (tevent_req_nomem(subreq, req)) {
2601 return tevent_req_post(req, ev);
2603 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
2608 * We send an SMB1 negprot with the SMB2 dialects
2609 * and expect a SMB1 or a SMB2 response.
2611 * smbXcli_negprot_dispatch_incoming() will fix the
2612 * callback to match protocol of the response.
2614 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
2616 subreq = smbXcli_negprot_smb1_subreq(state);
2617 if (tevent_req_nomem(subreq, req)) {
2618 return tevent_req_post(req, ev);
2620 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
2624 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
2626 struct tevent_req *req =
2627 tevent_req_callback_data(subreq,
2632 * we just want the low level error
2634 status = tevent_req_simple_recv_ntstatus(subreq);
2635 TALLOC_FREE(subreq);
2636 if (tevent_req_nterror(req, status)) {
2640 /* this should never happen */
2641 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2644 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
2647 DATA_BLOB bytes = data_blob_null;
2651 /* setup the protocol strings */
2652 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2656 if (smb1cli_prots[i].proto < state->min_protocol) {
2660 if (smb1cli_prots[i].proto > state->max_protocol) {
2664 ok = data_blob_append(state, &bytes, &c, sizeof(c));
2670 * We now it is already ascii and
2671 * we want NULL termination.
2673 ok = data_blob_append(state, &bytes,
2674 smb1cli_prots[i].smb1_name,
2675 strlen(smb1cli_prots[i].smb1_name)+1);
2681 smb1cli_req_flags(state->max_protocol,
2682 state->conn->smb1.client.capabilities,
2687 return smb1cli_req_send(state, state->ev, state->conn,
2691 state->timeout_msec,
2692 0xFFFE, 0, 0, /* pid, tid, uid */
2693 0, NULL, /* wct, vwv */
2694 bytes.length, bytes.data);
2697 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
2699 struct tevent_req *req =
2700 tevent_req_callback_data(subreq,
2702 struct smbXcli_negprot_state *state =
2703 tevent_req_data(req,
2704 struct smbXcli_negprot_state);
2705 struct smbXcli_conn *conn = state->conn;
2706 struct iovec *recv_iov = NULL;
2715 size_t num_prots = 0;
2717 uint32_t client_capabilities = conn->smb1.client.capabilities;
2718 uint32_t both_capabilities;
2719 uint32_t server_capabilities = 0;
2720 uint32_t capabilities;
2721 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
2722 uint32_t server_max_xmit = 0;
2724 uint32_t server_max_mux = 0;
2725 uint16_t server_security_mode = 0;
2726 uint32_t server_session_key = 0;
2727 bool server_readbraw = false;
2728 bool server_writebraw = false;
2729 bool server_lockread = false;
2730 bool server_writeunlock = false;
2731 struct GUID server_guid = GUID_zero();
2732 DATA_BLOB server_gss_blob = data_blob_null;
2733 uint8_t server_challenge[8];
2734 char *server_workgroup = NULL;
2735 char *server_name = NULL;
2736 int server_time_zone = 0;
2737 NTTIME server_system_time = 0;
2738 static const struct smb1cli_req_expected_response expected[] = {
2740 .status = NT_STATUS_OK,
2741 .wct = 0x11, /* NT1 */
2744 .status = NT_STATUS_OK,
2745 .wct = 0x0D, /* LM */
2748 .status = NT_STATUS_OK,
2749 .wct = 0x01, /* CORE */
2753 ZERO_STRUCT(server_challenge);
2755 status = smb1cli_req_recv(subreq, state,
2760 NULL, /* pvwv_offset */
2763 NULL, /* pbytes_offset */
2765 expected, ARRAY_SIZE(expected));
2766 TALLOC_FREE(subreq);
2767 if (tevent_req_nterror(req, status)) {
2771 flags = CVAL(inhdr, HDR_FLG);
2773 protnum = SVAL(vwv, 0);
2775 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2776 if (smb1cli_prots[i].proto < state->min_protocol) {
2780 if (smb1cli_prots[i].proto > state->max_protocol) {
2784 if (protnum != num_prots) {
2789 conn->protocol = smb1cli_prots[i].proto;
2793 if (conn->protocol == PROTOCOL_NONE) {
2794 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2798 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
2799 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
2800 "and the selected protocol level doesn't support it.\n"));
2801 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
2805 if (flags & FLAG_SUPPORT_LOCKREAD) {
2806 server_lockread = true;
2807 server_writeunlock = true;
2810 if (conn->protocol >= PROTOCOL_NT1) {
2811 const char *client_signing = NULL;
2812 bool server_mandatory = false;
2813 bool server_allowed = false;
2814 const char *server_signing = NULL;
2819 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2824 server_security_mode = CVAL(vwv + 1, 0);
2825 server_max_mux = SVAL(vwv + 1, 1);
2826 server_max_xmit = IVAL(vwv + 3, 1);
2827 server_session_key = IVAL(vwv + 7, 1);
2828 server_time_zone = SVALS(vwv + 15, 1);
2829 server_time_zone *= 60;
2830 /* this time arrives in real GMT */
2831 server_system_time = BVAL(vwv + 11, 1);
2832 server_capabilities = IVAL(vwv + 9, 1);
2834 key_len = CVAL(vwv + 16, 1);
2836 if (server_capabilities & CAP_RAW_MODE) {
2837 server_readbraw = true;
2838 server_writebraw = true;
2840 if (server_capabilities & CAP_LOCK_AND_READ) {
2841 server_lockread = true;
2844 if (server_capabilities & CAP_EXTENDED_SECURITY) {
2845 DATA_BLOB blob1, blob2;
2847 if (num_bytes < 16) {
2848 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2852 blob1 = data_blob_const(bytes, 16);
2853 status = GUID_from_data_blob(&blob1, &server_guid);
2854 if (tevent_req_nterror(req, status)) {
2858 blob1 = data_blob_const(bytes+16, num_bytes-16);
2859 blob2 = data_blob_dup_talloc(state, blob1);
2860 if (blob1.length > 0 &&
2861 tevent_req_nomem(blob2.data, req)) {
2864 server_gss_blob = blob2;
2866 DATA_BLOB blob1, blob2;
2868 if (num_bytes < key_len) {
2869 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2873 if (key_len != 0 && key_len != 8) {
2874 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2879 memcpy(server_challenge, bytes, 8);
2882 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
2883 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
2884 if (blob1.length > 0) {
2887 len = utf16_len_n(blob1.data,
2891 ok = convert_string_talloc(state,
2899 status = map_nt_error_from_unix_common(errno);
2900 tevent_req_nterror(req, status);
2905 blob2.data += blob1.length;
2906 blob2.length -= blob1.length;
2907 if (blob2.length > 0) {
2910 len = utf16_len_n(blob1.data,
2914 ok = convert_string_talloc(state,
2922 status = map_nt_error_from_unix_common(errno);
2923 tevent_req_nterror(req, status);
2929 client_signing = "disabled";
2930 if (conn->allow_signing) {
2931 client_signing = "allowed";
2933 if (conn->mandatory_signing) {
2934 client_signing = "required";
2937 server_signing = "not supported";
2938 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
2939 server_signing = "supported";
2940 server_allowed = true;
2942 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
2943 server_signing = "required";
2944 server_mandatory = true;
2947 ok = smb_signing_set_negotiated(conn->smb1.signing,
2951 DEBUG(1,("cli_negprot: SMB signing is required, "
2952 "but client[%s] and server[%s] mismatch\n",
2953 client_signing, server_signing));
2954 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
2958 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
2964 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2968 server_security_mode = SVAL(vwv + 1, 0);
2969 server_max_xmit = SVAL(vwv + 2, 0);
2970 server_max_mux = SVAL(vwv + 3, 0);
2971 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
2972 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
2973 server_session_key = IVAL(vwv + 6, 0);
2974 server_time_zone = SVALS(vwv + 10, 0);
2975 server_time_zone *= 60;
2976 /* this time is converted to GMT by make_unix_date */
2977 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
2978 unix_to_nt_time(&server_system_time, t);
2979 key_len = SVAL(vwv + 11, 0);
2981 if (num_bytes < key_len) {
2982 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2986 if (key_len != 0 && key_len != 8) {
2987 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2992 memcpy(server_challenge, bytes, 8);
2995 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
2996 if (blob1.length > 0) {
3000 len = utf16_len_n(blob1.data,
3004 ok = convert_string_talloc(state,
3012 status = map_nt_error_from_unix_common(errno);
3013 tevent_req_nterror(req, status);
3019 /* the old core protocol */
3020 server_time_zone = get_time_zone(time(NULL));
3021 server_max_xmit = 1024;
3025 if (server_max_xmit < 1024) {
3026 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3030 if (server_max_mux < 1) {
3031 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3036 * Now calculate the negotiated capabilities
3037 * based on the mask for:
3038 * - client only flags
3039 * - flags used in both directions
3040 * - server only flags
3042 both_capabilities = client_capabilities & server_capabilities;
3043 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3044 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3045 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3047 max_xmit = MIN(client_max_xmit, server_max_xmit);
3049 conn->smb1.server.capabilities = server_capabilities;
3050 conn->smb1.capabilities = capabilities;
3052 conn->smb1.server.max_xmit = server_max_xmit;
3053 conn->smb1.max_xmit = max_xmit;
3055 conn->smb1.server.max_mux = server_max_mux;
3057 conn->smb1.server.security_mode = server_security_mode;
3059 conn->smb1.server.readbraw = server_readbraw;
3060 conn->smb1.server.writebraw = server_writebraw;
3061 conn->smb1.server.lockread = server_lockread;
3062 conn->smb1.server.writeunlock = server_writeunlock;
3064 conn->smb1.server.session_key = server_session_key;
3066 talloc_steal(conn, server_gss_blob.data);
3067 conn->smb1.server.gss_blob = server_gss_blob;
3068 conn->smb1.server.guid = server_guid;
3069 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3070 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3071 conn->smb1.server.name = talloc_move(conn, &server_name);
3073 conn->smb1.server.time_zone = server_time_zone;
3074 conn->smb1.server.system_time = server_system_time;
3076 tevent_req_done(req);
3079 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3083 uint16_t dialect_count = 0;
3085 buf = state->smb2.dyn;
3086 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3087 if (smb2cli_prots[i].proto < state->min_protocol) {
3091 if (smb2cli_prots[i].proto > state->max_protocol) {
3095 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3099 buf = state->smb2.fixed;
3101 SSVAL(buf, 2, dialect_count);
3102 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3103 SSVAL(buf, 6, 0); /* Reserved */
3104 SSVAL(buf, 8, 0); /* Capabilities */
3105 memset(buf+12, 0, 16); /* ClientGuid */
3106 SBVAL(buf, 28, 0); /* ClientStartTime */
3108 return smb2cli_req_send(state, state->ev,
3109 state->conn, SMB2_OP_NEGPROT,
3111 state->timeout_msec,
3112 0xFEFF, 0, 0, /* pid, tid, uid */
3113 state->smb2.fixed, sizeof(state->smb2.fixed),
3114 state->smb2.dyn, dialect_count*2);
3117 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3119 struct tevent_req *req =
3120 tevent_req_callback_data(subreq,
3122 struct smbXcli_negprot_state *state =
3123 tevent_req_data(req,
3124 struct smbXcli_negprot_state);
3125 struct smbXcli_conn *conn = state->conn;
3126 size_t security_offset, security_length;
3132 uint16_t dialect_revision;
3133 static const struct smb2cli_req_expected_response expected[] = {
3135 .status = NT_STATUS_OK,
3140 status = smb2cli_req_recv(subreq, state, &iov,
3141 expected, ARRAY_SIZE(expected));
3142 TALLOC_FREE(subreq);
3143 if (tevent_req_nterror(req, status)) {
3147 body = (uint8_t *)iov[1].iov_base;
3149 dialect_revision = SVAL(body, 4);
3151 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3152 if (smb2cli_prots[i].proto < state->min_protocol) {
3156 if (smb2cli_prots[i].proto > state->max_protocol) {
3160 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3164 conn->protocol = smb2cli_prots[i].proto;
3168 if (conn->protocol == PROTOCOL_NONE) {
3169 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3173 conn->smb2.server.security_mode = SVAL(body, 2);
3175 blob = data_blob_const(body + 8, 16);
3176 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3177 if (tevent_req_nterror(req, status)) {
3181 conn->smb2.server.capabilities = IVAL(body, 24);
3182 conn->smb2.server.max_trans_size= IVAL(body, 28);
3183 conn->smb2.server.max_read_size = IVAL(body, 32);
3184 conn->smb2.server.max_write_size= IVAL(body, 36);
3185 conn->smb2.server.system_time = BVAL(body, 40);
3186 conn->smb2.server.start_time = BVAL(body, 48);
3188 security_offset = SVAL(body, 56);
3189 security_length = SVAL(body, 58);
3191 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3192 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3196 if (security_length > iov[2].iov_len) {
3197 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3201 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3204 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3208 tevent_req_done(req);
3211 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3212 TALLOC_CTX *tmp_mem,
3215 size_t num_pending = talloc_array_length(conn->pending);
3216 struct tevent_req *subreq;
3217 struct smbXcli_req_state *substate;
3218 struct tevent_req *req;
3219 struct smbXcli_negprot_state *state;
3220 uint32_t protocol_magic = IVAL(inbuf, 4);
3222 if (num_pending != 1) {
3223 return NT_STATUS_INTERNAL_ERROR;
3226 subreq = conn->pending[0];
3227 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3228 req = tevent_req_callback_data(subreq, struct tevent_req);
3229 state = tevent_req_data(req, struct smbXcli_negprot_state);
3231 switch (protocol_magic) {
3233 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3234 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3235 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3238 if (substate->smb2.recv_iov == NULL) {
3240 * For the SMB1 negprot we have move it.
3242 substate->smb2.recv_iov = substate->smb1.recv_iov;
3243 substate->smb1.recv_iov = NULL;
3246 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3247 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3248 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3251 DEBUG(10, ("Got non-SMB PDU\n"));
3252 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3255 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3257 return tevent_req_simple_recv_ntstatus(req);
3260 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3261 uint32_t timeout_msec,
3262 enum protocol_types min_protocol,
3263 enum protocol_types max_protocol)
3265 TALLOC_CTX *frame = talloc_stackframe();
3266 struct tevent_context *ev;
3267 struct tevent_req *req;
3268 NTSTATUS status = NT_STATUS_NO_MEMORY;
3271 if (smbXcli_conn_has_async_calls(conn)) {
3273 * Can't use sync call while an async call is in flight
3275 status = NT_STATUS_INVALID_PARAMETER_MIX;
3278 ev = tevent_context_init(frame);
3282 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3283 min_protocol, max_protocol);
3287 ok = tevent_req_poll(req, ev);
3289 status = map_nt_error_from_unix_common(errno);
3292 status = smbXcli_negprot_recv(req);