2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "../libcli/smb/smb_common.h"
28 #include "../libcli/smb/smb_seal.h"
29 #include "../libcli/smb/smb_signing.h"
30 #include "../libcli/smb/read_smb.h"
31 #include "smbXcli_base.h"
32 #include "librpc/ndr/libndr.h"
36 struct sockaddr_storage local_ss;
37 struct sockaddr_storage remote_ss;
38 const char *remote_name;
40 struct tevent_queue *outgoing;
41 struct tevent_req **pending;
42 struct tevent_req *read_smb_req;
44 enum protocol_types protocol;
47 bool mandatory_signing;
50 * The incoming dispatch function should return:
51 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
52 * - NT_STATUS_OK, if no more processing is desired, e.g.
53 * the dispatch function called
55 * - All other return values disconnect the connection.
57 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
63 uint32_t capabilities;
68 uint32_t capabilities;
71 uint16_t security_mode;
80 const char *workgroup;
85 uint32_t capabilities;
90 struct smb_signing_state *signing;
91 struct smb_trans_enc_state *trans_enc;
96 uint16_t security_mode;
100 uint32_t capabilities;
101 uint16_t security_mode;
103 uint32_t max_trans_size;
104 uint32_t max_read_size;
105 uint32_t max_write_size;
115 struct smbXcli_req_state {
116 struct tevent_context *ev;
117 struct smbXcli_conn *conn;
119 uint8_t length_hdr[4];
126 /* Space for the header including the wct */
127 uint8_t hdr[HDR_VWV];
130 * For normal requests, smb1cli_req_send chooses a mid.
131 * SecondaryV trans requests need to use the mid of the primary
132 * request, so we need a place to store it.
133 * Assume it is set if != 0.
138 uint8_t bytecount_buf[2];
140 #define MAX_SMB_IOV 5
141 /* length_hdr, hdr, words, byte_count, buffers */
142 struct iovec iov[1 + 3 + MAX_SMB_IOV];
146 struct tevent_req **chained_requests;
149 NTSTATUS recv_status;
150 /* always an array of 3 talloc elements */
151 struct iovec *recv_iov;
155 const uint8_t *fixed;
161 uint8_t pad[7]; /* padding space for compounding */
163 /* always an array of 3 talloc elements */
164 struct iovec *recv_iov;
168 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
171 * NT_STATUS_OK, means we do not notify the callers
173 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
175 if (conn->smb1.trans_enc) {
176 common_free_encryption_state(&conn->smb1.trans_enc);
182 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
184 const char *remote_name,
185 enum smb_signing_setting signing_state,
186 uint32_t smb1_capabilities)
188 struct smbXcli_conn *conn = NULL;
190 struct sockaddr *sa = NULL;
194 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
199 conn->remote_name = talloc_strdup(conn, remote_name);
200 if (conn->remote_name == NULL) {
206 ss = (void *)&conn->local_ss;
207 sa = (struct sockaddr *)ss;
208 sa_length = sizeof(conn->local_ss);
209 ret = getsockname(fd, sa, &sa_length);
213 ss = (void *)&conn->remote_ss;
214 sa = (struct sockaddr *)ss;
215 sa_length = sizeof(conn->remote_ss);
216 ret = getpeername(fd, sa, &sa_length);
221 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
222 if (conn->outgoing == NULL) {
225 conn->pending = NULL;
227 conn->protocol = PROTOCOL_NONE;
229 switch (signing_state) {
230 case SMB_SIGNING_OFF:
232 conn->allow_signing = false;
233 conn->desire_signing = false;
234 conn->mandatory_signing = false;
236 case SMB_SIGNING_DEFAULT:
237 case SMB_SIGNING_IF_REQUIRED:
238 /* if the server requires it */
239 conn->allow_signing = true;
240 conn->desire_signing = false;
241 conn->mandatory_signing = false;
243 case SMB_SIGNING_REQUIRED:
245 conn->allow_signing = true;
246 conn->desire_signing = true;
247 conn->mandatory_signing = true;
251 conn->smb1.client.capabilities = smb1_capabilities;
252 conn->smb1.client.max_xmit = UINT16_MAX;
254 conn->smb1.capabilities = conn->smb1.client.capabilities;
255 conn->smb1.max_xmit = 1024;
259 /* initialise signing */
260 conn->smb1.signing = smb_signing_init(conn,
262 conn->desire_signing,
263 conn->mandatory_signing);
264 if (!conn->smb1.signing) {
268 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
269 if (conn->mandatory_signing) {
270 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
273 talloc_set_destructor(conn, smbXcli_conn_destructor);
281 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
287 if (conn->fd == -1) {
294 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
296 return conn->protocol;
299 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
301 if (conn->protocol >= PROTOCOL_SMB2_02) {
305 if (conn->smb1.capabilities & CAP_UNICODE) {
312 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
314 set_socket_options(conn->fd, options);
317 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
319 return &conn->local_ss;
322 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
324 return &conn->remote_ss;
327 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
329 return conn->remote_name;
332 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
333 const DATA_BLOB user_session_key,
334 const DATA_BLOB response)
336 return smb_signing_activate(conn->smb1.signing,
341 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
342 const uint8_t *buf, uint32_t seqnum)
344 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
347 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
349 return smb_signing_is_active(conn->smb1.signing);
352 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
353 struct smb_trans_enc_state *es)
355 /* Replace the old state, if any. */
356 if (conn->smb1.trans_enc) {
357 common_free_encryption_state(&conn->smb1.trans_enc);
359 conn->smb1.trans_enc = es;
362 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
364 return common_encryption_on(conn->smb1.trans_enc);
368 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
370 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
371 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
373 if (NT_STATUS_IS_OK(status)) {
377 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
381 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
385 * Figure out if there is an andx command behind the current one
386 * @param[in] buf The smb buffer to look at
387 * @param[in] ofs The offset to the wct field that is followed by the cmd
388 * @retval Is there a command following?
391 static bool smb1cli_have_andx_command(const uint8_t *buf,
396 size_t buflen = talloc_get_size(buf);
398 if (!smb1cli_is_andx_req(cmd)) {
402 if ((ofs == buflen-1) || (ofs == buflen)) {
406 wct = CVAL(buf, ofs);
409 * Not enough space for the command and a following pointer
413 return (CVAL(buf, ofs+1) != 0xff);
417 * Is the SMB command able to hold an AND_X successor
418 * @param[in] cmd The SMB command in question
419 * @retval Can we add a chained request after "cmd"?
421 bool smb1cli_is_andx_req(uint8_t cmd)
441 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
443 size_t num_pending = talloc_array_length(conn->pending);
449 result = conn->smb1.mid++;
450 if ((result == 0) || (result == 0xffff)) {
454 for (i=0; i<num_pending; i++) {
455 if (result == smb1cli_req_mid(conn->pending[i])) {
460 if (i == num_pending) {
466 void smbXcli_req_unset_pending(struct tevent_req *req)
468 struct smbXcli_req_state *state =
470 struct smbXcli_req_state);
471 struct smbXcli_conn *conn = state->conn;
472 size_t num_pending = talloc_array_length(conn->pending);
475 if (state->smb1.mid != 0) {
477 * This is a [nt]trans[2] request which waits
478 * for more than one reply.
483 talloc_set_destructor(req, NULL);
485 if (num_pending == 1) {
487 * The pending read_smb tevent_req is a child of
488 * conn->pending. So if nothing is pending anymore, we need to
489 * delete the socket read fde.
491 TALLOC_FREE(conn->pending);
492 conn->read_smb_req = NULL;
496 for (i=0; i<num_pending; i++) {
497 if (req == conn->pending[i]) {
501 if (i == num_pending) {
503 * Something's seriously broken. Just returning here is the
504 * right thing nevertheless, the point of this routine is to
505 * remove ourselves from conn->pending.
511 * Remove ourselves from the conn->pending array
513 for (; i < (num_pending - 1); i++) {
514 conn->pending[i] = conn->pending[i+1];
518 * No NULL check here, we're shrinking by sizeof(void *), and
519 * talloc_realloc just adjusts the size for this.
521 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
526 static int smbXcli_req_destructor(struct tevent_req *req)
528 struct smbXcli_req_state *state =
530 struct smbXcli_req_state);
533 * Make sure we really remove it from
534 * the pending array on destruction.
537 smbXcli_req_unset_pending(req);
541 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
543 bool smbXcli_req_set_pending(struct tevent_req *req)
545 struct smbXcli_req_state *state =
547 struct smbXcli_req_state);
548 struct smbXcli_conn *conn;
549 struct tevent_req **pending;
554 if (!smbXcli_conn_is_connected(conn)) {
558 num_pending = talloc_array_length(conn->pending);
560 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
562 if (pending == NULL) {
565 pending[num_pending] = req;
566 conn->pending = pending;
567 talloc_set_destructor(req, smbXcli_req_destructor);
569 if (!smbXcli_conn_receive_next(conn)) {
571 * the caller should notify the current request
573 * And all other pending requests get notified
574 * by smbXcli_conn_disconnect().
576 smbXcli_req_unset_pending(req);
577 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
584 static void smbXcli_conn_received(struct tevent_req *subreq);
586 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
588 size_t num_pending = talloc_array_length(conn->pending);
589 struct tevent_req *req;
590 struct smbXcli_req_state *state;
592 if (conn->read_smb_req != NULL) {
596 if (num_pending == 0) {
597 if (conn->smb2.mid < UINT64_MAX) {
598 /* no more pending requests, so we are done for now */
603 * If there are no more SMB2 requests possible,
604 * because we are out of message ids,
605 * we need to disconnect.
607 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
611 req = conn->pending[0];
612 state = tevent_req_data(req, struct smbXcli_req_state);
615 * We're the first ones, add the read_smb request that waits for the
616 * answer from the server
618 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
619 if (conn->read_smb_req == NULL) {
622 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
626 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
628 if (conn->fd != -1) {
634 * Cancel all pending requests. We do not do a for-loop walking
635 * conn->pending because that array changes in
636 * smbXcli_req_unset_pending.
638 while (talloc_array_length(conn->pending) > 0) {
639 struct tevent_req *req;
640 struct smbXcli_req_state *state;
642 req = conn->pending[0];
643 state = tevent_req_data(req, struct smbXcli_req_state);
646 * We're dead. No point waiting for trans2
651 smbXcli_req_unset_pending(req);
653 if (NT_STATUS_IS_OK(status)) {
654 /* do not notify the callers */
659 * we need to defer the callback, because we may notify more
662 tevent_req_defer_callback(req, state->ev);
663 tevent_req_nterror(req, status);
668 * Fetch a smb request's mid. Only valid after the request has been sent by
669 * smb1cli_req_send().
671 uint16_t smb1cli_req_mid(struct tevent_req *req)
673 struct smbXcli_req_state *state =
675 struct smbXcli_req_state);
677 if (state->smb1.mid != 0) {
678 return state->smb1.mid;
681 return SVAL(state->smb1.hdr, HDR_MID);
684 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
686 struct smbXcli_req_state *state =
688 struct smbXcli_req_state);
690 state->smb1.mid = mid;
693 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
695 struct smbXcli_req_state *state =
697 struct smbXcli_req_state);
699 return state->smb1.seqnum;
702 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
704 struct smbXcli_req_state *state =
706 struct smbXcli_req_state);
708 state->smb1.seqnum = seqnum;
711 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
715 for (i=0; i<count; i++) {
716 result += iov[i].iov_len;
721 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
722 const struct iovec *iov,
725 size_t len = smbXcli_iov_len(iov, count);
730 buf = talloc_array(mem_ctx, uint8_t, len);
735 for (i=0; i<count; i++) {
736 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
737 copied += iov[i].iov_len;
742 static void smb1cli_req_flags(enum protocol_types protocol,
743 uint32_t smb1_capabilities,
745 uint8_t additional_flags,
748 uint16_t additional_flags2,
749 uint16_t clear_flags2,
755 if (protocol >= PROTOCOL_LANMAN1) {
756 flags |= FLAG_CASELESS_PATHNAMES;
757 flags |= FLAG_CANONICAL_PATHNAMES;
760 if (protocol >= PROTOCOL_LANMAN2) {
761 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
762 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
765 if (protocol >= PROTOCOL_NT1) {
766 flags2 |= FLAGS2_IS_LONG_NAME;
768 if (smb1_capabilities & CAP_UNICODE) {
769 flags2 |= FLAGS2_UNICODE_STRINGS;
771 if (smb1_capabilities & CAP_STATUS32) {
772 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
774 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
775 flags2 |= FLAGS2_EXTENDED_SECURITY;
779 flags |= additional_flags;
780 flags &= ~clear_flags;
781 flags2 |= additional_flags2;
782 flags2 &= ~clear_flags2;
788 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
789 struct tevent_context *ev,
790 struct smbXcli_conn *conn,
792 uint8_t additional_flags,
794 uint16_t additional_flags2,
795 uint16_t clear_flags2,
796 uint32_t timeout_msec,
800 uint8_t wct, uint16_t *vwv,
802 struct iovec *bytes_iov)
804 struct tevent_req *req;
805 struct smbXcli_req_state *state;
809 if (iov_count > MAX_SMB_IOV) {
811 * Should not happen :-)
816 req = tevent_req_create(mem_ctx, &state,
817 struct smbXcli_req_state);
824 state->smb1.recv_cmd = 0xFF;
825 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
826 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
827 if (state->smb1.recv_iov == NULL) {
832 smb1cli_req_flags(conn->protocol,
833 conn->smb1.capabilities,
842 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
843 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
844 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
845 SCVAL(state->smb1.hdr, HDR_FLG, flags);
846 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
847 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
848 SSVAL(state->smb1.hdr, HDR_TID, tid);
849 SSVAL(state->smb1.hdr, HDR_PID, pid);
850 SSVAL(state->smb1.hdr, HDR_UID, uid);
851 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
852 SSVAL(state->smb1.hdr, HDR_WCT, wct);
854 state->smb1.vwv = vwv;
856 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
858 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
859 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
860 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
861 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
862 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
863 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
864 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
865 state->smb1.iov[3].iov_len = sizeof(uint16_t);
867 if (iov_count != 0) {
868 memcpy(&state->smb1.iov[4], bytes_iov,
869 iov_count * sizeof(*bytes_iov));
871 state->smb1.iov_count = iov_count + 4;
873 if (timeout_msec > 0) {
874 struct timeval endtime;
876 endtime = timeval_current_ofs_msec(timeout_msec);
877 if (!tevent_req_set_endtime(req, ev, endtime)) {
882 switch (smb_command) {
887 state->one_way = true;
891 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
892 state->one_way = true;
900 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
901 struct iovec *iov, int iov_count,
907 * Obvious optimization: Make cli_calculate_sign_mac work with struct
908 * iovec directly. MD5Update would do that just fine.
912 return NT_STATUS_INVALID_PARAMETER_MIX;
914 if (iov[0].iov_len != NBT_HDR_SIZE) {
915 return NT_STATUS_INVALID_PARAMETER_MIX;
917 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
918 return NT_STATUS_INVALID_PARAMETER_MIX;
920 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
921 return NT_STATUS_INVALID_PARAMETER_MIX;
923 if (iov[3].iov_len != sizeof(uint16_t)) {
924 return NT_STATUS_INVALID_PARAMETER_MIX;
927 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
929 return NT_STATUS_NO_MEMORY;
932 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
933 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
934 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
940 static void smb1cli_req_writev_done(struct tevent_req *subreq);
941 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
945 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
946 struct smbXcli_req_state *state,
947 struct iovec *iov, int iov_count)
949 struct tevent_req *subreq;
953 if (!smbXcli_conn_is_connected(state->conn)) {
954 return NT_STATUS_CONNECTION_DISCONNECTED;
957 if (state->conn->protocol > PROTOCOL_NT1) {
958 return NT_STATUS_REVISION_MISMATCH;
962 return NT_STATUS_INVALID_PARAMETER_MIX;
964 if (iov[0].iov_len != NBT_HDR_SIZE) {
965 return NT_STATUS_INVALID_PARAMETER_MIX;
967 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
968 return NT_STATUS_INVALID_PARAMETER_MIX;
970 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
971 return NT_STATUS_INVALID_PARAMETER_MIX;
973 if (iov[3].iov_len != sizeof(uint16_t)) {
974 return NT_STATUS_INVALID_PARAMETER_MIX;
977 if (state->smb1.mid != 0) {
978 mid = state->smb1.mid;
980 mid = smb1cli_alloc_mid(state->conn);
982 SSVAL(iov[1].iov_base, HDR_MID, mid);
984 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
986 status = smb1cli_conn_signv(state->conn, iov, iov_count,
987 &state->smb1.seqnum);
989 if (!NT_STATUS_IS_OK(status)) {
994 * If we supported multiple encrytion contexts
995 * here we'd look up based on tid.
997 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1000 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1002 return NT_STATUS_NO_MEMORY;
1004 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1005 (char *)buf, &enc_buf);
1007 if (!NT_STATUS_IS_OK(status)) {
1008 DEBUG(0, ("Error in encrypting client message: %s\n",
1009 nt_errstr(status)));
1012 buf = (char *)talloc_memdup(state, enc_buf,
1013 smb_len_nbt(enc_buf)+4);
1016 return NT_STATUS_NO_MEMORY;
1018 iov[0].iov_base = (void *)buf;
1019 iov[0].iov_len = talloc_get_size(buf);
1023 if (state->conn->dispatch_incoming == NULL) {
1024 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1027 subreq = writev_send(state, state->ev, state->conn->outgoing,
1028 state->conn->fd, false, iov, iov_count);
1029 if (subreq == NULL) {
1030 return NT_STATUS_NO_MEMORY;
1032 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1033 return NT_STATUS_OK;
1036 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1037 struct tevent_context *ev,
1038 struct smbXcli_conn *conn,
1039 uint8_t smb_command,
1040 uint8_t additional_flags,
1041 uint8_t clear_flags,
1042 uint16_t additional_flags2,
1043 uint16_t clear_flags2,
1044 uint32_t timeout_msec,
1048 uint8_t wct, uint16_t *vwv,
1050 const uint8_t *bytes)
1052 struct tevent_req *req;
1056 iov.iov_base = discard_const_p(void, bytes);
1057 iov.iov_len = num_bytes;
1059 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1060 additional_flags, clear_flags,
1061 additional_flags2, clear_flags2,
1068 if (!tevent_req_is_in_progress(req)) {
1069 return tevent_req_post(req, ev);
1071 status = smb1cli_req_chain_submit(&req, 1);
1072 if (tevent_req_nterror(req, status)) {
1073 return tevent_req_post(req, ev);
1078 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1080 struct tevent_req *req =
1081 tevent_req_callback_data(subreq,
1083 struct smbXcli_req_state *state =
1084 tevent_req_data(req,
1085 struct smbXcli_req_state);
1089 nwritten = writev_recv(subreq, &err);
1090 TALLOC_FREE(subreq);
1091 if (nwritten == -1) {
1092 NTSTATUS status = map_nt_error_from_unix_common(err);
1093 smbXcli_conn_disconnect(state->conn, status);
1097 if (state->one_way) {
1098 state->inbuf = NULL;
1099 tevent_req_done(req);
1103 if (!smbXcli_req_set_pending(req)) {
1104 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1109 static void smbXcli_conn_received(struct tevent_req *subreq)
1111 struct smbXcli_conn *conn =
1112 tevent_req_callback_data(subreq,
1113 struct smbXcli_conn);
1114 TALLOC_CTX *frame = talloc_stackframe();
1120 if (subreq != conn->read_smb_req) {
1121 DEBUG(1, ("Internal error: cli_smb_received called with "
1122 "unexpected subreq\n"));
1123 status = NT_STATUS_INTERNAL_ERROR;
1124 smbXcli_conn_disconnect(conn, status);
1128 conn->read_smb_req = NULL;
1130 received = read_smb_recv(subreq, frame, &inbuf, &err);
1131 TALLOC_FREE(subreq);
1132 if (received == -1) {
1133 status = map_nt_error_from_unix_common(err);
1134 smbXcli_conn_disconnect(conn, status);
1139 status = conn->dispatch_incoming(conn, frame, inbuf);
1141 if (NT_STATUS_IS_OK(status)) {
1143 * We should not do any more processing
1144 * as the dispatch function called
1145 * tevent_req_done().
1148 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1150 * We got an error, so notify all pending requests
1152 smbXcli_conn_disconnect(conn, status);
1157 * We got NT_STATUS_RETRY, so we may ask for a
1158 * next incoming pdu.
1160 if (!smbXcli_conn_receive_next(conn)) {
1161 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1165 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1166 struct iovec **piov, int *pnum_iov)
1177 buflen = smb_len_nbt(buf);
1180 hdr = buf + NBT_HDR_SIZE;
1182 if (buflen < MIN_SMB_SIZE) {
1183 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1187 * This returns iovec elements in the following order:
1202 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1204 return NT_STATUS_NO_MEMORY;
1206 iov[0].iov_base = hdr;
1207 iov[0].iov_len = HDR_WCT;
1210 cmd = CVAL(hdr, HDR_COM);
1214 size_t len = buflen - taken;
1216 struct iovec *iov_tmp;
1223 * we need at least WCT and BCC
1225 needed = sizeof(uint8_t) + sizeof(uint16_t);
1227 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1228 __location__, (int)len, (int)needed));
1233 * Now we check if the specified words are there
1235 wct = CVAL(hdr, wct_ofs);
1236 needed += wct * sizeof(uint16_t);
1238 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1239 __location__, (int)len, (int)needed));
1244 * Now we check if the specified bytes are there
1246 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1247 bcc = SVAL(hdr, bcc_ofs);
1248 needed += bcc * sizeof(uint8_t);
1250 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1251 __location__, (int)len, (int)needed));
1256 * we allocate 2 iovec structures for words and bytes
1258 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1260 if (iov_tmp == NULL) {
1262 return NT_STATUS_NO_MEMORY;
1265 cur = &iov[num_iov];
1268 cur[0].iov_len = wct * sizeof(uint16_t);
1269 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1270 cur[1].iov_len = bcc * sizeof(uint8_t);
1271 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1275 if (!smb1cli_is_andx_req(cmd)) {
1277 * If the current command does not have AndX chanining
1283 if (wct == 0 && bcc == 0) {
1285 * An empty response also ends the chain,
1286 * most likely with an error.
1292 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1293 __location__, (int)wct, (int)cmd));
1296 cmd = CVAL(cur[0].iov_base, 0);
1299 * If it is the end of the chain we are also done.
1303 wct_ofs = SVAL(cur[0].iov_base, 2);
1305 if (wct_ofs < taken) {
1306 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1308 if (wct_ofs > buflen) {
1309 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1313 * we consumed everything up to the start of the next
1319 remaining = buflen - taken;
1321 if (remaining > 0 && num_iov >= 3) {
1323 * The last DATA block gets the remaining
1324 * bytes, this is needed to support
1325 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1327 iov[num_iov-1].iov_len += remaining;
1331 *pnum_iov = num_iov;
1332 return NT_STATUS_OK;
1336 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1339 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1340 TALLOC_CTX *tmp_mem,
1343 struct tevent_req *req;
1344 struct smbXcli_req_state *state;
1351 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1352 struct iovec *iov = NULL;
1355 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1356 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1357 DEBUG(10, ("Got non-SMB PDU\n"));
1358 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1362 * If we supported multiple encrytion contexts
1363 * here we'd look up based on tid.
1365 if (common_encryption_on(conn->smb1.trans_enc)
1366 && (CVAL(inbuf, 0) == 0)) {
1367 uint16_t enc_ctx_num;
1369 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1370 if (!NT_STATUS_IS_OK(status)) {
1371 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1372 nt_errstr(status)));
1376 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1377 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1379 conn->smb1.trans_enc->enc_ctx_num));
1380 return NT_STATUS_INVALID_HANDLE;
1383 status = common_decrypt_buffer(conn->smb1.trans_enc,
1385 if (!NT_STATUS_IS_OK(status)) {
1386 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1387 nt_errstr(status)));
1392 mid = SVAL(inhdr, HDR_MID);
1393 num_pending = talloc_array_length(conn->pending);
1395 for (i=0; i<num_pending; i++) {
1396 if (mid == smb1cli_req_mid(conn->pending[i])) {
1400 if (i == num_pending) {
1401 /* Dump unexpected reply */
1402 return NT_STATUS_RETRY;
1405 oplock_break = false;
1407 if (mid == 0xffff) {
1409 * Paranoia checks that this is really an oplock break request.
1411 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1412 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1413 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1414 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1415 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1417 if (!oplock_break) {
1418 /* Dump unexpected reply */
1419 return NT_STATUS_RETRY;
1423 req = conn->pending[i];
1424 state = tevent_req_data(req, struct smbXcli_req_state);
1426 if (!oplock_break /* oplock breaks are not signed */
1427 && !smb_signing_check_pdu(conn->smb1.signing,
1428 inbuf, state->smb1.seqnum+1)) {
1429 DEBUG(10, ("cli_check_sign_mac failed\n"));
1430 return NT_STATUS_ACCESS_DENIED;
1433 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1435 if (!NT_STATUS_IS_OK(status)) {
1436 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1437 nt_errstr(status)));
1441 cmd = CVAL(inhdr, HDR_COM);
1442 status = smb1cli_pull_raw_error(inhdr);
1444 if (state->smb1.chained_requests != NULL) {
1445 struct tevent_req **chain = talloc_move(tmp_mem,
1446 &state->smb1.chained_requests);
1447 size_t num_chained = talloc_array_length(chain);
1448 size_t num_responses = (num_iov - 1)/2;
1450 if (num_responses > num_chained) {
1451 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1454 for (i=0; i<num_chained; i++) {
1455 size_t iov_idx = 1 + (i*2);
1456 struct iovec *cur = &iov[iov_idx];
1460 state = tevent_req_data(req, struct smbXcli_req_state);
1462 smbXcli_req_unset_pending(req);
1465 * as we finish multiple requests here
1466 * we need to defer the callbacks as
1467 * they could destroy our current stack state.
1469 tevent_req_defer_callback(req, state->ev);
1471 if (i >= num_responses) {
1472 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1476 state->smb1.recv_cmd = cmd;
1478 if (i == (num_responses - 1)) {
1480 * The last request in the chain gets the status
1482 state->smb1.recv_status = status;
1484 cmd = CVAL(cur[0].iov_base, 0);
1485 state->smb1.recv_status = NT_STATUS_OK;
1488 state->inbuf = inbuf;
1491 * Note: here we use talloc_reference() in a way
1492 * that does not expose it to the caller.
1494 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1495 if (tevent_req_nomem(inbuf_ref, req)) {
1499 /* copy the related buffers */
1500 state->smb1.recv_iov[0] = iov[0];
1501 state->smb1.recv_iov[1] = cur[0];
1502 state->smb1.recv_iov[2] = cur[1];
1504 tevent_req_done(req);
1506 return NT_STATUS_RETRY;
1510 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1513 smbXcli_req_unset_pending(req);
1515 state->smb1.recv_cmd = cmd;
1516 state->smb1.recv_status = status;
1517 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1519 state->smb1.recv_iov[0] = iov[0];
1520 state->smb1.recv_iov[1] = iov[1];
1521 state->smb1.recv_iov[2] = iov[2];
1523 if (talloc_array_length(conn->pending) == 0) {
1524 tevent_req_done(req);
1525 return NT_STATUS_OK;
1528 tevent_req_defer_callback(req, state->ev);
1529 tevent_req_done(req);
1530 return NT_STATUS_RETRY;
1533 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1534 TALLOC_CTX *mem_ctx,
1535 struct iovec **piov,
1539 uint32_t *pvwv_offset,
1540 uint32_t *pnum_bytes,
1542 uint32_t *pbytes_offset,
1544 const struct smb1cli_req_expected_response *expected,
1545 size_t num_expected)
1547 struct smbXcli_req_state *state =
1548 tevent_req_data(req,
1549 struct smbXcli_req_state);
1550 NTSTATUS status = NT_STATUS_OK;
1551 struct iovec *recv_iov = NULL;
1552 uint8_t *hdr = NULL;
1554 uint32_t vwv_offset = 0;
1555 uint16_t *vwv = NULL;
1556 uint32_t num_bytes = 0;
1557 uint32_t bytes_offset = 0;
1558 uint8_t *bytes = NULL;
1560 bool found_status = false;
1561 bool found_size = false;
1575 if (pvwv_offset != NULL) {
1578 if (pnum_bytes != NULL) {
1581 if (pbytes != NULL) {
1584 if (pbytes_offset != NULL) {
1587 if (pinbuf != NULL) {
1591 if (state->inbuf != NULL) {
1592 recv_iov = state->smb1.recv_iov;
1593 hdr = (uint8_t *)recv_iov[0].iov_base;
1594 wct = recv_iov[1].iov_len/2;
1595 vwv = (uint16_t *)recv_iov[1].iov_base;
1596 vwv_offset = PTR_DIFF(vwv, hdr);
1597 num_bytes = recv_iov[2].iov_len;
1598 bytes = (uint8_t *)recv_iov[2].iov_base;
1599 bytes_offset = PTR_DIFF(bytes, hdr);
1602 if (tevent_req_is_nterror(req, &status)) {
1603 for (i=0; i < num_expected; i++) {
1604 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1605 found_status = true;
1611 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1617 if (num_expected == 0) {
1618 found_status = true;
1622 status = state->smb1.recv_status;
1624 for (i=0; i < num_expected; i++) {
1625 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1629 found_status = true;
1630 if (expected[i].wct == 0) {
1635 if (expected[i].wct == wct) {
1641 if (!found_status) {
1646 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1650 *piov = talloc_move(mem_ctx, &recv_iov);
1662 if (pvwv_offset != NULL) {
1663 *pvwv_offset = vwv_offset;
1665 if (pnum_bytes != NULL) {
1666 *pnum_bytes = num_bytes;
1668 if (pbytes != NULL) {
1671 if (pbytes_offset != NULL) {
1672 *pbytes_offset = bytes_offset;
1674 if (pinbuf != NULL) {
1675 *pinbuf = state->inbuf;
1681 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1688 for (i=0; i<num_reqs; i++) {
1689 struct smbXcli_req_state *state;
1690 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1691 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1692 state->smb1.iov_count-2);
1693 wct_ofs = (wct_ofs + 3) & ~3;
1698 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1700 struct smbXcli_req_state *first_state =
1701 tevent_req_data(reqs[0],
1702 struct smbXcli_req_state);
1703 struct smbXcli_req_state *last_state =
1704 tevent_req_data(reqs[num_reqs-1],
1705 struct smbXcli_req_state);
1706 struct smbXcli_req_state *state;
1708 size_t chain_padding = 0;
1710 struct iovec *iov = NULL;
1711 struct iovec *this_iov;
1715 if (num_reqs == 1) {
1716 return smb1cli_req_writev_submit(reqs[0], first_state,
1717 first_state->smb1.iov,
1718 first_state->smb1.iov_count);
1722 for (i=0; i<num_reqs; i++) {
1723 if (!tevent_req_is_in_progress(reqs[i])) {
1724 return NT_STATUS_INTERNAL_ERROR;
1727 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1729 if (state->smb1.iov_count < 4) {
1730 return NT_STATUS_INVALID_PARAMETER_MIX;
1735 * The NBT and SMB header
1748 iovlen += state->smb1.iov_count - 2;
1751 iov = talloc_zero_array(last_state, struct iovec, iovlen);
1753 return NT_STATUS_NO_MEMORY;
1756 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1757 last_state, reqs, sizeof(*reqs) * num_reqs);
1758 if (first_state->smb1.chained_requests == NULL) {
1760 return NT_STATUS_NO_MEMORY;
1763 wct_offset = HDR_WCT;
1766 for (i=0; i<num_reqs; i++) {
1767 size_t next_padding = 0;
1770 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1772 if (i < num_reqs-1) {
1773 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1774 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1776 TALLOC_FREE(first_state->smb1.chained_requests);
1777 return NT_STATUS_INVALID_PARAMETER_MIX;
1781 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1782 state->smb1.iov_count-2) + 1;
1783 if ((wct_offset % 4) != 0) {
1784 next_padding = 4 - (wct_offset % 4);
1786 wct_offset += next_padding;
1787 vwv = state->smb1.vwv;
1789 if (i < num_reqs-1) {
1790 struct smbXcli_req_state *next_state =
1791 tevent_req_data(reqs[i+1],
1792 struct smbXcli_req_state);
1793 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1795 SSVAL(vwv+1, 0, wct_offset);
1796 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1797 /* properly end the chain */
1798 SCVAL(vwv+0, 0, 0xff);
1799 SCVAL(vwv+0, 1, 0xff);
1805 * The NBT and SMB header
1807 this_iov[0] = state->smb1.iov[0];
1808 this_iov[1] = state->smb1.iov[1];
1812 * This one is a bit subtle. We have to add
1813 * chain_padding bytes between the requests, and we
1814 * have to also include the wct field of the
1815 * subsequent requests. We use the subsequent header
1816 * for the padding, it contains the wct field in its
1819 this_iov[0].iov_len = chain_padding+1;
1820 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1821 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1822 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1827 * copy the words and bytes
1829 memcpy(this_iov, state->smb1.iov+2,
1830 sizeof(struct iovec) * (state->smb1.iov_count-2));
1831 this_iov += state->smb1.iov_count - 2;
1832 chain_padding = next_padding;
1835 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1836 if (nbt_len > first_state->conn->smb1.max_xmit) {
1838 TALLOC_FREE(first_state->smb1.chained_requests);
1839 return NT_STATUS_INVALID_PARAMETER_MIX;
1842 status = smb1cli_req_writev_submit(reqs[0], last_state, iov, iovlen);
1843 if (!NT_STATUS_IS_OK(status)) {
1845 TALLOC_FREE(first_state->smb1.chained_requests);
1849 for (i=0; i < (num_reqs - 1); i++) {
1850 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1852 state->smb1.seqnum = last_state->smb1.seqnum;
1855 return NT_STATUS_OK;
1858 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1860 return ((tevent_queue_length(conn->outgoing) != 0)
1861 || (talloc_array_length(conn->pending) != 0));
1864 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
1865 struct tevent_context *ev,
1866 struct smbXcli_conn *conn,
1868 uint32_t additional_flags,
1869 uint32_t clear_flags,
1870 uint32_t timeout_msec,
1874 const uint8_t *fixed,
1879 struct tevent_req *req;
1880 struct smbXcli_req_state *state;
1883 req = tevent_req_create(mem_ctx, &state,
1884 struct smbXcli_req_state);
1892 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
1893 if (state->smb2.recv_iov == NULL) {
1898 flags |= additional_flags;
1899 flags &= ~clear_flags;
1901 state->smb2.fixed = fixed;
1902 state->smb2.fixed_len = fixed_len;
1903 state->smb2.dyn = dyn;
1904 state->smb2.dyn_len = dyn_len;
1906 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
1907 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
1908 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, 1);
1909 SIVAL(state->smb2.hdr, SMB2_HDR_STATUS, NT_STATUS_V(NT_STATUS_OK));
1910 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
1911 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, 31);
1912 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
1913 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
1914 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
1915 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
1918 case SMB2_OP_CANCEL:
1919 state->one_way = true;
1923 * If this is a dummy request, it will have
1924 * UINT64_MAX as message id.
1925 * If we send on break acknowledgement,
1926 * this gets overwritten later.
1928 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
1932 if (timeout_msec > 0) {
1933 struct timeval endtime;
1935 endtime = timeval_current_ofs_msec(timeout_msec);
1936 if (!tevent_req_set_endtime(req, ev, endtime)) {
1944 static void smb2cli_writev_done(struct tevent_req *subreq);
1945 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1946 TALLOC_CTX *tmp_mem,
1949 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
1952 struct smbXcli_req_state *state;
1953 struct tevent_req *subreq;
1955 int i, num_iov, nbt_len;
1958 * 1 for the nbt length
1959 * per request: HDR, fixed, dyn, padding
1960 * -1 because the last one does not need padding
1963 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
1965 return NT_STATUS_NO_MEMORY;
1971 for (i=0; i<num_reqs; i++) {
1976 if (!tevent_req_is_in_progress(reqs[i])) {
1977 return NT_STATUS_INTERNAL_ERROR;
1980 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1982 if (!smbXcli_conn_is_connected(state->conn)) {
1983 return NT_STATUS_CONNECTION_DISCONNECTED;
1986 if ((state->conn->protocol != PROTOCOL_NONE) &&
1987 (state->conn->protocol < PROTOCOL_SMB2_02)) {
1988 return NT_STATUS_REVISION_MISMATCH;
1991 if (state->conn->smb2.mid == UINT64_MAX) {
1992 return NT_STATUS_CONNECTION_ABORTED;
1995 mid = state->conn->smb2.mid;
1996 state->conn->smb2.mid += 1;
1998 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2000 iov[num_iov].iov_base = state->smb2.hdr;
2001 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2004 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2005 iov[num_iov].iov_len = state->smb2.fixed_len;
2008 if (state->smb2.dyn != NULL) {
2009 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2010 iov[num_iov].iov_len = state->smb2.dyn_len;
2014 reqlen = sizeof(state->smb2.hdr);
2015 reqlen += state->smb2.fixed_len;
2016 reqlen += state->smb2.dyn_len;
2018 if (i < num_reqs-1) {
2019 if ((reqlen % 8) > 0) {
2020 uint8_t pad = 8 - (reqlen % 8);
2021 iov[num_iov].iov_base = state->smb2.pad;
2022 iov[num_iov].iov_len = pad;
2026 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2030 ret = smbXcli_req_set_pending(reqs[i]);
2032 return NT_STATUS_NO_MEMORY;
2037 * TODO: Do signing here
2040 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2041 _smb_setlen_tcp(state->length_hdr, nbt_len);
2042 iov[0].iov_base = state->length_hdr;
2043 iov[0].iov_len = sizeof(state->length_hdr);
2045 if (state->conn->dispatch_incoming == NULL) {
2046 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2049 subreq = writev_send(state, state->ev, state->conn->outgoing,
2050 state->conn->fd, false, iov, num_iov);
2051 if (subreq == NULL) {
2052 return NT_STATUS_NO_MEMORY;
2054 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2055 return NT_STATUS_OK;
2058 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2059 struct tevent_context *ev,
2060 struct smbXcli_conn *conn,
2062 uint32_t additional_flags,
2063 uint32_t clear_flags,
2064 uint32_t timeout_msec,
2068 const uint8_t *fixed,
2073 struct tevent_req *req;
2076 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2077 additional_flags, clear_flags,
2080 fixed, fixed_len, dyn, dyn_len);
2084 if (!tevent_req_is_in_progress(req)) {
2085 return tevent_req_post(req, ev);
2087 status = smb2cli_req_compound_submit(&req, 1);
2088 if (tevent_req_nterror(req, status)) {
2089 return tevent_req_post(req, ev);
2094 static void smb2cli_writev_done(struct tevent_req *subreq)
2096 struct tevent_req *req =
2097 tevent_req_callback_data(subreq,
2099 struct smbXcli_req_state *state =
2100 tevent_req_data(req,
2101 struct smbXcli_req_state);
2105 nwritten = writev_recv(subreq, &err);
2106 TALLOC_FREE(subreq);
2107 if (nwritten == -1) {
2108 /* here, we need to notify all pending requests */
2109 NTSTATUS status = map_nt_error_from_unix_common(err);
2110 smbXcli_conn_disconnect(state->conn, status);
2115 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2116 struct iovec **piov, int *pnum_iov)
2126 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2128 return NT_STATUS_NO_MEMORY;
2131 buflen = smb_len_tcp(buf);
2133 first_hdr = buf + NBT_HDR_SIZE;
2135 while (taken < buflen) {
2136 size_t len = buflen - taken;
2137 uint8_t *hdr = first_hdr + taken;
2140 size_t next_command_ofs;
2142 struct iovec *iov_tmp;
2145 * We need the header plus the body length field
2148 if (len < SMB2_HDR_BODY + 2) {
2149 DEBUG(10, ("%d bytes left, expected at least %d\n",
2150 (int)len, SMB2_HDR_BODY));
2153 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2154 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2158 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2159 DEBUG(10, ("Got HDR len %d, expected %d\n",
2160 SVAL(hdr, 4), SMB2_HDR_BODY));
2165 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2166 body_size = SVAL(hdr, SMB2_HDR_BODY);
2168 if (next_command_ofs != 0) {
2169 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2172 if (next_command_ofs > full_size) {
2175 full_size = next_command_ofs;
2177 if (body_size < 2) {
2180 body_size &= 0xfffe;
2182 if (body_size > (full_size - SMB2_HDR_BODY)) {
2186 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2188 if (iov_tmp == NULL) {
2190 return NT_STATUS_NO_MEMORY;
2193 cur = &iov[num_iov];
2196 cur[0].iov_base = hdr;
2197 cur[0].iov_len = SMB2_HDR_BODY;
2198 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2199 cur[1].iov_len = body_size;
2200 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2201 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2207 *pnum_iov = num_iov;
2208 return NT_STATUS_OK;
2212 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2215 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2218 size_t num_pending = talloc_array_length(conn->pending);
2221 for (i=0; i<num_pending; i++) {
2222 struct tevent_req *req = conn->pending[i];
2223 struct smbXcli_req_state *state =
2224 tevent_req_data(req,
2225 struct smbXcli_req_state);
2227 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2234 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2235 TALLOC_CTX *tmp_mem,
2238 struct tevent_req *req;
2239 struct smbXcli_req_state *state = NULL;
2245 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2247 if (!NT_STATUS_IS_OK(status)) {
2251 for (i=0; i<num_iov; i+=3) {
2252 uint8_t *inbuf_ref = NULL;
2253 struct iovec *cur = &iov[i];
2254 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2255 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2256 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2257 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2258 uint16_t req_opcode;
2260 req = smb2cli_conn_find_pending(conn, mid);
2262 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2264 state = tevent_req_data(req, struct smbXcli_req_state);
2266 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2267 if (opcode != req_opcode) {
2268 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2271 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2272 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2275 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2276 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2277 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2278 uint32_t req_flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2279 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2281 req_flags |= SMB2_HDR_FLAG_ASYNC;
2282 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2283 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2287 smbXcli_req_unset_pending(req);
2290 * There might be more than one response
2291 * we need to defer the notifications
2293 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2298 tevent_req_defer_callback(req, state->ev);
2302 * Note: here we use talloc_reference() in a way
2303 * that does not expose it to the caller.
2305 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2306 if (tevent_req_nomem(inbuf_ref, req)) {
2310 /* copy the related buffers */
2311 state->smb2.recv_iov[0] = cur[0];
2312 state->smb2.recv_iov[1] = cur[1];
2313 state->smb2.recv_iov[2] = cur[2];
2315 tevent_req_done(req);
2319 return NT_STATUS_RETRY;
2322 return NT_STATUS_OK;
2325 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2326 struct iovec **piov,
2327 const struct smb2cli_req_expected_response *expected,
2328 size_t num_expected)
2330 struct smbXcli_req_state *state =
2331 tevent_req_data(req,
2332 struct smbXcli_req_state);
2335 bool found_status = false;
2336 bool found_size = false;
2343 if (tevent_req_is_nterror(req, &status)) {
2344 for (i=0; i < num_expected; i++) {
2345 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2346 found_status = true;
2352 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2358 if (num_expected == 0) {
2359 found_status = true;
2363 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2364 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2366 for (i=0; i < num_expected; i++) {
2367 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2371 found_status = true;
2372 if (expected[i].body_size == 0) {
2377 if (expected[i].body_size == body_size) {
2383 if (!found_status) {
2388 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2392 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);