2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "system/network.h"
26 #endif /* HAVE_LIBURING */
27 #include "smbd/smbd.h"
28 #include "smbd/globals.h"
29 #include "../libcli/smb/smb_common.h"
30 #include "../lib/tsocket/tsocket.h"
31 #include "../lib/util/tevent_ntstatus.h"
32 #include "smbprofile.h"
33 #include "../lib/util/bitmap.h"
34 #include "../librpc/gen_ndr/krb5pac.h"
35 #include "lib/util/iov_buf.h"
37 #include "libcli/smb/smbXcli_base.h"
38 #include "system/threads.h"
41 /* SIOCOUTQ TIOCOUTQ are the same */
42 #define __IOCTL_SEND_QUEUE_SIZE_OPCODE TIOCOUTQ
43 #define __HAVE_TCP_INFO_RTO 1
44 #define __ALLOW_MULTI_CHANNEL_SUPPORT 1
45 #elif defined(FREEBSD)
46 #define __IOCTL_SEND_QUEUE_SIZE_OPCODE FIONWRITE
47 #define __HAVE_TCP_INFO_RTO 1
48 #define __ALLOW_MULTI_CHANNEL_SUPPORT 1
51 #include "lib/crypto/gnutls_helpers.h"
52 #include <gnutls/gnutls.h>
53 #include <gnutls/crypto.h>
56 #define DBGC_CLASS DBGC_SMB2
58 static void smbd_smb2_connection_handler(struct tevent_context *ev,
59 struct tevent_fd *fde,
62 static NTSTATUS smbd_smb2_flush_send_queue(struct smbXsrv_connection *xconn);
64 static const struct smbd_smb2_dispatch_table {
71 bool allow_invalid_fileid;
73 } smbd_smb2_table[] = {
74 #define _OP(o) .opcode = o, .name = #o
79 _OP(SMB2_OP_SESSSETUP),
89 * This call needs to be run as root.
91 * smbd_smb2_request_process_tcon()
92 * calls make_connection_snum(), which will call
93 * change_to_user(), when needed.
103 .need_session = true,
107 .need_session = true,
112 .need_session = true,
117 .need_session = true,
122 .need_session = true,
128 .need_session = true,
133 .need_session = true,
136 .allow_invalid_fileid = true,
142 _OP(SMB2_OP_KEEPALIVE),
145 _OP(SMB2_OP_QUERY_DIRECTORY),
146 .need_session = true,
151 .need_session = true,
155 _OP(SMB2_OP_GETINFO),
156 .need_session = true,
160 _OP(SMB2_OP_SETINFO),
161 .need_session = true,
167 .need_session = true,
172 * as LEASE breaks does not
178 const char *smb2_opcode_name(uint16_t opcode)
180 if (opcode >= ARRAY_SIZE(smbd_smb2_table)) {
181 return "Bad SMB2 opcode";
183 return smbd_smb2_table[opcode].name;
186 static const struct smbd_smb2_dispatch_table *smbd_smb2_call(uint16_t opcode)
188 const struct smbd_smb2_dispatch_table *ret = NULL;
190 if (opcode >= ARRAY_SIZE(smbd_smb2_table)) {
194 ret = &smbd_smb2_table[opcode];
196 SMB_ASSERT(ret->opcode == opcode);
201 static void print_req_vectors(const struct smbd_smb2_request *req)
205 for (i = 0; i < req->in.vector_count; i++) {
206 dbgtext("\treq->in.vector[%u].iov_len = %u\n",
208 (unsigned int)req->in.vector[i].iov_len);
210 for (i = 0; i < req->out.vector_count; i++) {
211 dbgtext("\treq->out.vector[%u].iov_len = %u\n",
213 (unsigned int)req->out.vector[i].iov_len);
217 bool smbd_is_smb2_header(const uint8_t *inbuf, size_t size)
219 if (size < (4 + SMB2_HDR_BODY)) {
223 if (IVAL(inbuf, 4) != SMB2_MAGIC) {
230 bool smbd_smb2_is_compound(const struct smbd_smb2_request *req)
232 return req->in.vector_count >= (2*SMBD_SMB2_NUM_IOV_PER_REQ);
235 static NTSTATUS smbd_initialize_smb2(struct smbXsrv_connection *xconn,
236 uint64_t expected_seq_low)
240 xconn->smb2.credits.seq_low = expected_seq_low;
241 xconn->smb2.credits.seq_range = 1;
242 xconn->smb2.credits.granted = 1;
243 xconn->smb2.credits.max = lp_smb2_max_credits();
244 xconn->smb2.credits.bitmap = bitmap_talloc(xconn,
245 xconn->smb2.credits.max);
246 if (xconn->smb2.credits.bitmap == NULL) {
247 return NT_STATUS_NO_MEMORY;
250 tevent_fd_set_close_fn(xconn->transport.fde, NULL);
251 TALLOC_FREE(xconn->transport.fde);
253 xconn->transport.fde = tevent_add_fd(
254 xconn->client->raw_ev_ctx,
256 xconn->transport.sock,
258 smbd_smb2_connection_handler,
260 if (xconn->transport.fde == NULL) {
261 close(xconn->transport.sock);
262 xconn->transport.sock = -1;
263 return NT_STATUS_NO_MEMORY;
265 tevent_fd_set_auto_close(xconn->transport.fde);
267 if (lp_parm_bool(-1, "smb2srv", "send_uring", false)) {
268 ret = samba_io_uring_create(xconn,
270 xconn->client->raw_ev_ctx,
271 &xconn->smb2.send_uring);
273 const struct samba_io_uring_features *features =
274 samba_io_uring_get_features(xconn->smb2.send_uring);
276 if (!features->op_sendmsg) {
277 TALLOC_FREE(xconn->smb2.send_uring);
281 if (xconn->smb2.send_uring != NULL) {
282 set_blocking(xconn->transport.sock, true);
284 /* Ensure child is set to non-blocking mode */
285 //set_blocking(xconn->transport.sock, false);
289 #define smb2_len(buf) (PVAL(buf,3)|(PVAL(buf,2)<<8)|(PVAL(buf,1)<<16))
290 #define _smb2_setlen(_buf,len) do { \
291 uint8_t *buf = (uint8_t *)_buf; \
293 buf[1] = ((len)&0xFF0000)>>16; \
294 buf[2] = ((len)&0xFF00)>>8; \
295 buf[3] = (len)&0xFF; \
298 static bool smb2_setup_nbt_length(struct iovec *vector, int count)
306 len = iov_buflen(vector+1, count-1);
308 if ((len == -1) || (len > 0xFFFFFF)) {
312 DBG_ERR("len[%zd]\n", len);
313 _smb2_setlen(vector[0].iov_base, len);
317 static int smbd_smb2_request_destructor(struct smbd_smb2_request *req)
319 if (req->first_key.length > 0) {
320 data_blob_clear_free(&req->first_key);
322 if (req->last_key.length > 0) {
323 data_blob_clear_free(&req->last_key);
328 void smb2_request_set_async_internal(struct smbd_smb2_request *req,
331 req->async_internal = async_internal;
334 static struct smbd_smb2_request *smbd_smb2_request_allocate(TALLOC_CTX *mem_ctx)
336 TALLOC_CTX *mem_pool;
337 struct smbd_smb2_request *req;
340 /* Enable this to find subtle valgrind errors. */
341 mem_pool = talloc_init("smbd_smb2_request_allocate");
343 mem_pool = talloc_tos();
345 if (mem_pool == NULL) {
349 req = talloc_zero(mem_pool, struct smbd_smb2_request);
351 talloc_free(mem_pool);
354 talloc_reparent(mem_pool, mem_ctx, req);
356 TALLOC_FREE(mem_pool);
359 req->last_session_id = UINT64_MAX;
360 req->last_tid = UINT32_MAX;
362 talloc_set_destructor(req, smbd_smb2_request_destructor);
367 static NTSTATUS smbd_smb2_inbuf_parse_compound(struct smbXsrv_connection *xconn,
371 struct smbd_smb2_request *req,
375 TALLOC_CTX *mem_ctx = req;
379 uint8_t *first_hdr = buf;
380 size_t verified_buflen = 0;
385 * Note: index '0' is reserved for the transport protocol
387 iov = req->in._vector;
389 while (taken < buflen) {
390 size_t len = buflen - taken;
391 uint8_t *hdr = first_hdr + taken;
394 size_t next_command_ofs;
396 uint8_t *body = NULL;
399 struct iovec *iov_alloc = NULL;
401 if (iov != req->in._vector) {
405 if (verified_buflen > taken) {
406 len = verified_buflen - taken;
413 DEBUG(10, ("%d bytes left, expected at least %d\n",
417 if (IVAL(hdr, 0) == SMB2_TF_MAGIC) {
418 struct smbXsrv_session *s = NULL;
420 struct iovec tf_iov[2];
424 if (xconn->protocol < PROTOCOL_SMB2_24) {
425 DEBUG(10, ("Got SMB2_TRANSFORM header, "
426 "but dialect[0x%04X] is used\n",
427 xconn->smb2.server.dialect));
431 if (xconn->smb2.server.cipher == 0) {
432 DEBUG(10, ("Got SMB2_TRANSFORM header, "
433 "but not negotiated "
434 "client[0x%08X] server[0x%08X]\n",
435 xconn->smb2.client.capabilities,
436 xconn->smb2.server.capabilities));
440 if (len < SMB2_TF_HDR_SIZE) {
441 DEBUG(1, ("%d bytes left, expected at least %d\n",
442 (int)len, SMB2_TF_HDR_SIZE));
446 tf_len = SMB2_TF_HDR_SIZE;
449 hdr = first_hdr + taken;
450 enc_len = IVAL(tf, SMB2_TF_MSG_SIZE);
451 uid = BVAL(tf, SMB2_TF_SESSION_ID);
453 if (len < SMB2_TF_HDR_SIZE + enc_len) {
454 DEBUG(1, ("%d bytes left, expected at least %d\n",
456 (int)(SMB2_TF_HDR_SIZE + enc_len)));
460 status = smb2srv_session_lookup_conn(xconn, uid, now,
463 DEBUG(1, ("invalid session[%llu] in "
464 "SMB2_TRANSFORM header\n",
465 (unsigned long long)uid));
466 TALLOC_FREE(iov_alloc);
467 return NT_STATUS_USER_SESSION_DELETED;
470 tf_iov[0].iov_base = (void *)tf;
471 tf_iov[0].iov_len = tf_len;
472 tf_iov[1].iov_base = (void *)hdr;
473 tf_iov[1].iov_len = enc_len;
475 status = smb2_signing_decrypt_pdu(s->global->decryption_key,
476 xconn->smb2.server.cipher,
478 if (!NT_STATUS_IS_OK(status)) {
479 TALLOC_FREE(iov_alloc);
483 verified_buflen = taken + enc_len;
488 * We need the header plus the body length field
491 if (len < SMB2_HDR_BODY + 2) {
494 (IVAL(hdr, 0) == SMB_SUICIDE_PACKET) &&
495 lp_parm_bool(-1, "smbd", "suicide mode", false)) {
496 uint8_t exitcode = CVAL(hdr, 4);
497 DBG_WARNING("SUICIDE: Exiting immediately "
498 "with code %"PRIu8"\n",
503 DEBUG(10, ("%d bytes left, expected at least %d\n",
504 (int)len, SMB2_HDR_BODY));
507 if (IVAL(hdr, 0) != SMB2_MAGIC) {
508 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
512 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
513 DEBUG(10, ("Got HDR len %d, expected %d\n",
514 SVAL(hdr, 4), SMB2_HDR_BODY));
519 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
520 body_size = SVAL(hdr, SMB2_HDR_BODY);
522 if (next_command_ofs != 0) {
523 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
526 if (next_command_ofs > full_size) {
529 full_size = next_command_ofs;
536 if (body_size > (full_size - SMB2_HDR_BODY)) {
538 * let the caller handle the error
540 body_size = full_size - SMB2_HDR_BODY;
542 body = hdr + SMB2_HDR_BODY;
543 dyn = body + body_size;
544 dyn_size = full_size - (SMB2_HDR_BODY + body_size);
546 if (num_iov >= ARRAY_SIZE(req->in._vector)) {
547 struct iovec *iov_tmp = NULL;
549 iov_tmp = talloc_realloc(mem_ctx, iov_alloc,
552 SMBD_SMB2_NUM_IOV_PER_REQ);
553 if (iov_tmp == NULL) {
554 TALLOC_FREE(iov_alloc);
555 return NT_STATUS_NO_MEMORY;
558 if (iov_alloc == NULL) {
561 sizeof(req->in._vector));
567 num_iov += SMBD_SMB2_NUM_IOV_PER_REQ;
569 cur[SMBD_SMB2_TF_IOV_OFS].iov_base = tf;
570 cur[SMBD_SMB2_TF_IOV_OFS].iov_len = tf_len;
571 cur[SMBD_SMB2_HDR_IOV_OFS].iov_base = hdr;
572 cur[SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
573 cur[SMBD_SMB2_BODY_IOV_OFS].iov_base = body;
574 cur[SMBD_SMB2_BODY_IOV_OFS].iov_len = body_size;
575 cur[SMBD_SMB2_DYN_IOV_OFS].iov_base = dyn;
576 cur[SMBD_SMB2_DYN_IOV_OFS].iov_len = dyn_size;
586 if (iov != req->in._vector) {
589 return NT_STATUS_INVALID_PARAMETER;
592 static NTSTATUS smbd_smb2_request_create(struct smbXsrv_connection *xconn,
593 const uint8_t *_inpdu, size_t size,
594 struct smbd_smb2_request **_req)
596 struct smbd_server_connection *sconn = xconn->client->sconn;
597 struct smbd_smb2_request *req;
598 uint32_t protocol_version;
599 uint8_t *inpdu = NULL;
600 const uint8_t *inhdr = NULL;
602 uint32_t next_command_ofs;
606 if (size < (SMB2_HDR_BODY + 2)) {
607 DEBUG(0,("Invalid SMB2 packet length count %ld\n", (long)size));
608 return NT_STATUS_INVALID_PARAMETER;
613 protocol_version = IVAL(inhdr, SMB2_HDR_PROTOCOL_ID);
614 if (protocol_version != SMB2_MAGIC) {
615 DEBUG(0,("Invalid SMB packet: protocol prefix: 0x%08X\n",
617 return NT_STATUS_INVALID_PARAMETER;
620 cmd = SVAL(inhdr, SMB2_HDR_OPCODE);
621 if (cmd != SMB2_OP_NEGPROT) {
622 DEBUG(0,("Invalid SMB packet: first request: 0x%04X\n",
624 return NT_STATUS_INVALID_PARAMETER;
627 next_command_ofs = IVAL(inhdr, SMB2_HDR_NEXT_COMMAND);
628 if (next_command_ofs != 0) {
629 DEBUG(0,("Invalid SMB packet: next_command: 0x%08X\n",
631 return NT_STATUS_INVALID_PARAMETER;
634 req = smbd_smb2_request_allocate(xconn);
636 return NT_STATUS_NO_MEMORY;
641 inpdu = talloc_memdup(req, _inpdu, size);
643 return NT_STATUS_NO_MEMORY;
646 req->request_time = timeval_current();
647 now = timeval_to_nttime(&req->request_time);
649 status = smbd_smb2_inbuf_parse_compound(xconn,
653 req, &req->in.vector,
654 &req->in.vector_count);
655 if (!NT_STATUS_IS_OK(status)) {
660 req->current_idx = 1;
666 static bool smb2_validate_sequence_number(struct smbXsrv_connection *xconn,
667 uint64_t message_id, uint64_t seq_id)
669 struct bitmap *credits_bm = xconn->smb2.credits.bitmap;
673 seq_tmp = xconn->smb2.credits.seq_low;
674 if (seq_id < seq_tmp) {
675 DBGC_ERR(DBGC_SMB2_CREDITS,
676 "smb2_validate_sequence_number: bad message_id "
677 "%llu (sequence id %llu) "
678 "(granted = %u, low = %llu, range = %u)\n",
679 (unsigned long long)message_id,
680 (unsigned long long)seq_id,
681 (unsigned int)xconn->smb2.credits.granted,
682 (unsigned long long)xconn->smb2.credits.seq_low,
683 (unsigned int)xconn->smb2.credits.seq_range);
687 seq_tmp += xconn->smb2.credits.seq_range;
688 if (seq_id >= seq_tmp) {
689 DBGC_ERR(DBGC_SMB2_CREDITS,
690 "smb2_validate_sequence_number: bad message_id "
691 "%llu (sequence id %llu) "
692 "(granted = %u, low = %llu, range = %u)\n",
693 (unsigned long long)message_id,
694 (unsigned long long)seq_id,
695 (unsigned int)xconn->smb2.credits.granted,
696 (unsigned long long)xconn->smb2.credits.seq_low,
697 (unsigned int)xconn->smb2.credits.seq_range);
701 offset = seq_id % xconn->smb2.credits.max;
703 if (bitmap_query(credits_bm, offset)) {
704 DBGC_ERR(DBGC_SMB2_CREDITS,
705 "smb2_validate_sequence_number: duplicate message_id "
706 "%llu (sequence id %llu) "
707 "(granted = %u, low = %llu, range = %u) "
709 (unsigned long long)message_id,
710 (unsigned long long)seq_id,
711 (unsigned int)xconn->smb2.credits.granted,
712 (unsigned long long)xconn->smb2.credits.seq_low,
713 (unsigned int)xconn->smb2.credits.seq_range,
718 /* Mark the message_ids as seen in the bitmap. */
719 bitmap_set(credits_bm, offset);
721 if (seq_id != xconn->smb2.credits.seq_low) {
726 * Move the window forward by all the message_id's
729 while (bitmap_query(credits_bm, offset)) {
730 DBGC_DEBUG(DBGC_SMB2_CREDITS,
731 "smb2_validate_sequence_number: clearing "
732 "id %llu (position %u) from bitmap\n",
733 (unsigned long long)(xconn->smb2.credits.seq_low),
735 bitmap_clear(credits_bm, offset);
737 xconn->smb2.credits.seq_low += 1;
738 xconn->smb2.credits.seq_range -= 1;
739 offset = xconn->smb2.credits.seq_low % xconn->smb2.credits.max;
745 static bool smb2_validate_message_id(struct smbXsrv_connection *xconn,
746 const uint8_t *inhdr)
748 uint64_t message_id = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
749 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
750 uint16_t credit_charge = 1;
753 if (opcode == SMB2_OP_CANCEL) {
754 /* SMB2_CANCEL requests by definition resend messageids. */
758 if (xconn->smb2.credits.multicredit) {
759 credit_charge = SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE);
760 credit_charge = MAX(credit_charge, 1);
765 ("smb2_validate_message_id: mid %llu (charge %llu), "
766 "credits_granted %llu, "
767 "seqnum low/range: %llu/%llu\n",
768 (unsigned long long) message_id,
769 (unsigned long long) credit_charge,
770 (unsigned long long) xconn->smb2.credits.granted,
771 (unsigned long long) xconn->smb2.credits.seq_low,
772 (unsigned long long) xconn->smb2.credits.seq_range));
774 if (xconn->smb2.credits.granted < credit_charge) {
775 DBGC_ERR(DBGC_SMB2_CREDITS,
776 "smb2_validate_message_id: client used more "
777 "credits than granted, mid %llu, charge %llu, "
778 "credits_granted %llu, "
779 "seqnum low/range: %llu/%llu\n",
780 (unsigned long long) message_id,
781 (unsigned long long) credit_charge,
782 (unsigned long long) xconn->smb2.credits.granted,
783 (unsigned long long) xconn->smb2.credits.seq_low,
784 (unsigned long long) xconn->smb2.credits.seq_range);
789 * now check the message ids
791 * for multi-credit requests we need to check all current mid plus
792 * the implicit mids caused by the credit charge
793 * e.g. current mid = 15, charge 5 => mark 15-19 as used
796 for (i = 0; i <= (credit_charge-1); i++) {
797 uint64_t id = message_id + i;
802 ("Iterating mid %llu charge %u (sequence %llu)\n",
803 (unsigned long long)message_id,
805 (unsigned long long)id));
807 ok = smb2_validate_sequence_number(xconn, message_id, id);
813 /* substract used credits */
814 xconn->smb2.credits.granted -= credit_charge;
819 static NTSTATUS smbd_smb2_request_validate(struct smbd_smb2_request *req)
824 count = req->in.vector_count;
826 if (count < 1 + SMBD_SMB2_NUM_IOV_PER_REQ) {
827 /* It's not a SMB2 request */
828 return NT_STATUS_INVALID_PARAMETER;
831 for (idx=1; idx < count; idx += SMBD_SMB2_NUM_IOV_PER_REQ) {
832 struct iovec *hdr = SMBD_SMB2_IDX_HDR_IOV(req,in,idx);
833 struct iovec *body = SMBD_SMB2_IDX_BODY_IOV(req,in,idx);
834 const uint8_t *inhdr = NULL;
836 if (hdr->iov_len != SMB2_HDR_BODY) {
837 return NT_STATUS_INVALID_PARAMETER;
840 if (body->iov_len < 2) {
841 return NT_STATUS_INVALID_PARAMETER;
844 inhdr = (const uint8_t *)hdr->iov_base;
846 /* Check the SMB2 header */
847 if (IVAL(inhdr, SMB2_HDR_PROTOCOL_ID) != SMB2_MAGIC) {
848 return NT_STATUS_INVALID_PARAMETER;
851 if (!smb2_validate_message_id(req->xconn, inhdr)) {
852 return NT_STATUS_INVALID_PARAMETER;
859 static void smb2_set_operation_credit(struct smbXsrv_connection *xconn,
860 const struct iovec *in_vector,
861 struct iovec *out_vector)
863 const uint8_t *inhdr = (const uint8_t *)in_vector->iov_base;
864 uint8_t *outhdr = (uint8_t *)out_vector->iov_base;
865 uint16_t credit_charge = 1;
866 uint16_t credits_requested;
870 uint16_t credits_granted = 0;
871 uint64_t credits_possible;
872 uint16_t current_max_credits;
875 * first we grant only 1/16th of the max range.
877 * Windows also starts with the 1/16th and then grants
878 * more later. I was only able to trigger higher
879 * values, when using a very high credit charge.
881 * TODO: scale up depending on load, free memory
883 * Maybe also on the relationship between number
884 * of requests and the used sequence number.
885 * Which means we would grant more credits
886 * for client which use multi credit requests.
888 * The above is what Windows Server < 2016 is doing,
889 * but new servers use all credits (8192 by default).
891 current_max_credits = xconn->smb2.credits.max;
892 current_max_credits = MAX(current_max_credits, 1);
894 if (xconn->smb2.credits.multicredit) {
895 credit_charge = SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE);
896 credit_charge = MAX(credit_charge, 1);
899 cmd = SVAL(inhdr, SMB2_HDR_OPCODE);
900 credits_requested = SVAL(inhdr, SMB2_HDR_CREDIT);
901 credits_requested = MAX(credits_requested, 1);
902 out_flags = IVAL(outhdr, SMB2_HDR_FLAGS);
903 out_status = NT_STATUS(IVAL(outhdr, SMB2_HDR_STATUS));
905 SMB_ASSERT(xconn->smb2.credits.max >= xconn->smb2.credits.granted);
907 if (xconn->smb2.credits.max < credit_charge) {
908 smbd_server_connection_terminate(xconn,
909 "client error: credit charge > max credits\n");
913 if (out_flags & SMB2_HDR_FLAG_ASYNC) {
915 * In case we already send an async interim
916 * response, we should not grant
917 * credits on the final response.
921 uint16_t additional_possible =
922 xconn->smb2.credits.max - credit_charge;
923 uint16_t additional_max = 0;
924 uint16_t additional_credits = credits_requested - 1;
927 case SMB2_OP_NEGPROT:
929 case SMB2_OP_SESSSETUP:
931 * Windows 2012 RC1 starts to grant
933 * with a successful session setup
935 if (NT_STATUS_IS_OK(out_status)) {
936 additional_max = xconn->smb2.credits.max;
941 * Windows Server < 2016 and older Samba versions
942 * used to only grant additional credits in
943 * chunks of 32 credits.
945 * But we match Windows Server 2016 and grant
946 * all credits as requested.
948 additional_max = xconn->smb2.credits.max;
952 additional_max = MIN(additional_max, additional_possible);
953 additional_credits = MIN(additional_credits, additional_max);
955 credits_granted = credit_charge + additional_credits;
959 * sequence numbers should not wrap
961 * 1. calculate the possible credits until
962 * the sequence numbers start to wrap on 64-bit.
964 * 2. UINT64_MAX is used for Break Notifications.
966 * 2. truncate the possible credits to the maximum
967 * credits we want to grant to the client in total.
969 * 3. remove the range we'll already granted to the client
970 * this makes sure the client consumes the lowest sequence
971 * number, before we can grant additional credits.
973 credits_possible = UINT64_MAX - xconn->smb2.credits.seq_low;
974 if (credits_possible > 0) {
975 /* remove UINT64_MAX */
976 credits_possible -= 1;
978 credits_possible = MIN(credits_possible, current_max_credits);
979 credits_possible -= xconn->smb2.credits.seq_range;
981 credits_granted = MIN(credits_granted, credits_possible);
983 SSVAL(outhdr, SMB2_HDR_CREDIT, credits_granted);
984 xconn->smb2.credits.granted += credits_granted;
985 xconn->smb2.credits.seq_range += credits_granted;
987 DBGC_DEBUG(DBGC_SMB2_CREDITS,
988 "smb2_set_operation_credit: requested %u, charge %u, "
989 "granted %u, current possible/max %u/%u, "
990 "total granted/max/low/range %u/%u/%llu/%u\n",
991 (unsigned int)credits_requested,
992 (unsigned int)credit_charge,
993 (unsigned int)credits_granted,
994 (unsigned int)credits_possible,
995 (unsigned int)current_max_credits,
996 (unsigned int)xconn->smb2.credits.granted,
997 (unsigned int)xconn->smb2.credits.max,
998 (unsigned long long)xconn->smb2.credits.seq_low,
999 (unsigned int)xconn->smb2.credits.seq_range);
1002 static void smb2_calculate_credits(const struct smbd_smb2_request *inreq,
1003 struct smbd_smb2_request *outreq)
1006 uint16_t total_credits = 0;
1008 count = outreq->out.vector_count;
1010 for (idx=1; idx < count; idx += SMBD_SMB2_NUM_IOV_PER_REQ) {
1011 struct iovec *inhdr_v = SMBD_SMB2_IDX_HDR_IOV(inreq,in,idx);
1012 struct iovec *outhdr_v = SMBD_SMB2_IDX_HDR_IOV(outreq,out,idx);
1013 uint8_t *outhdr = (uint8_t *)outhdr_v->iov_base;
1015 smb2_set_operation_credit(outreq->xconn, inhdr_v, outhdr_v);
1017 /* To match Windows, count up what we
1019 total_credits += SVAL(outhdr, SMB2_HDR_CREDIT);
1020 /* Set to zero in all but the last reply. */
1021 if (idx + SMBD_SMB2_NUM_IOV_PER_REQ < count) {
1022 SSVAL(outhdr, SMB2_HDR_CREDIT, 0);
1024 SSVAL(outhdr, SMB2_HDR_CREDIT, total_credits);
1029 DATA_BLOB smbd_smb2_generate_outbody(struct smbd_smb2_request *req, size_t size)
1031 if (req->current_idx <= 1) {
1032 if (size <= sizeof(req->out._body)) {
1033 return data_blob_const(req->out._body, size);
1037 return data_blob_talloc(req, NULL, size);
1040 static NTSTATUS smbd_smb2_request_setup_out(struct smbd_smb2_request *req)
1042 struct smbXsrv_connection *xconn = req->xconn;
1043 TALLOC_CTX *mem_ctx;
1044 struct iovec *vector;
1049 count = req->in.vector_count;
1050 if (count <= ARRAY_SIZE(req->out._vector)) {
1052 vector = req->out._vector;
1054 vector = talloc_zero_array(req, struct iovec, count);
1055 if (vector == NULL) {
1056 return NT_STATUS_NO_MEMORY;
1061 vector[0].iov_base = req->out.nbt_hdr;
1062 vector[0].iov_len = 4;
1063 SIVAL(req->out.nbt_hdr, 0, 0);
1065 for (idx=1; idx < count; idx += SMBD_SMB2_NUM_IOV_PER_REQ) {
1066 struct iovec *inhdr_v = SMBD_SMB2_IDX_HDR_IOV(req,in,idx);
1067 const uint8_t *inhdr = (const uint8_t *)inhdr_v->iov_base;
1068 uint8_t *outhdr = NULL;
1069 uint8_t *outbody = NULL;
1070 uint32_t next_command_ofs = 0;
1071 struct iovec *current = &vector[idx];
1073 if ((idx + SMBD_SMB2_NUM_IOV_PER_REQ) < count) {
1074 /* we have a next command -
1075 * setup for the error case. */
1076 next_command_ofs = SMB2_HDR_BODY + 9;
1080 outhdr = req->out._hdr;
1082 outhdr = talloc_zero_array(mem_ctx, uint8_t,
1084 if (outhdr == NULL) {
1085 return NT_STATUS_NO_MEMORY;
1089 outbody = outhdr + SMB2_HDR_BODY;
1092 * SMBD_SMB2_TF_IOV_OFS might be used later
1094 current[SMBD_SMB2_TF_IOV_OFS].iov_base = NULL;
1095 current[SMBD_SMB2_TF_IOV_OFS].iov_len = 0;
1097 current[SMBD_SMB2_HDR_IOV_OFS].iov_base = (void *)outhdr;
1098 current[SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
1100 current[SMBD_SMB2_BODY_IOV_OFS].iov_base = (void *)outbody;
1101 current[SMBD_SMB2_BODY_IOV_OFS].iov_len = 8;
1103 current[SMBD_SMB2_DYN_IOV_OFS].iov_base = NULL;
1104 current[SMBD_SMB2_DYN_IOV_OFS].iov_len = 0;
1106 /* setup the SMB2 header */
1107 SIVAL(outhdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
1108 SSVAL(outhdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
1109 SSVAL(outhdr, SMB2_HDR_CREDIT_CHARGE,
1110 SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE));
1111 SIVAL(outhdr, SMB2_HDR_STATUS,
1112 NT_STATUS_V(NT_STATUS_INTERNAL_ERROR));
1113 SSVAL(outhdr, SMB2_HDR_OPCODE,
1114 SVAL(inhdr, SMB2_HDR_OPCODE));
1115 SIVAL(outhdr, SMB2_HDR_FLAGS,
1116 IVAL(inhdr, SMB2_HDR_FLAGS) | SMB2_HDR_FLAG_REDIRECT);
1117 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, next_command_ofs);
1118 SBVAL(outhdr, SMB2_HDR_MESSAGE_ID,
1119 BVAL(inhdr, SMB2_HDR_MESSAGE_ID));
1120 SIVAL(outhdr, SMB2_HDR_PID,
1121 IVAL(inhdr, SMB2_HDR_PID));
1122 SIVAL(outhdr, SMB2_HDR_TID,
1123 IVAL(inhdr, SMB2_HDR_TID));
1124 SBVAL(outhdr, SMB2_HDR_SESSION_ID,
1125 BVAL(inhdr, SMB2_HDR_SESSION_ID));
1126 memcpy(outhdr + SMB2_HDR_SIGNATURE,
1127 inhdr + SMB2_HDR_SIGNATURE, 16);
1129 /* setup error body header */
1130 SSVAL(outbody, 0x00, 0x08 + 1);
1131 SSVAL(outbody, 0x02, 0);
1132 SIVAL(outbody, 0x04, 0);
1135 req->out.vector = vector;
1136 req->out.vector_count = count;
1138 /* setup the length of the NBT packet */
1139 ok = smb2_setup_nbt_length(req->out.vector, req->out.vector_count);
1141 return NT_STATUS_INVALID_PARAMETER_MIX;
1144 DLIST_ADD_END(xconn->smb2.requests, req);
1146 return NT_STATUS_OK;
1149 bool smbXsrv_server_multi_channel_enabled(void)
1151 bool enabled = lp_server_multi_channel_support();
1152 #ifndef __ALLOW_MULTI_CHANNEL_SUPPORT
1153 bool forced = false;
1155 * If we don't have support from the kernel
1156 * to ask for the un-acked number of bytes
1157 * in the socket send queue, we better
1158 * don't support multi-channel.
1160 forced = lp_parm_bool(-1, "force", "server multi channel support", false);
1161 if (enabled && !forced) {
1162 D_NOTICE("'server multi channel support' enabled "
1163 "but not supported on %s (%s)\n",
1164 SYSTEM_UNAME_SYSNAME, SYSTEM_UNAME_RELEASE);
1165 DEBUGADD(DBGLVL_NOTICE, ("Please report this on "
1166 "https://bugzilla.samba.org/show_bug.cgi?id=11897\n"));
1169 #endif /* ! __ALLOW_MULTI_CHANNEL_SUPPORT */
1173 static NTSTATUS smbXsrv_connection_get_rto_usecs(struct smbXsrv_connection *xconn,
1174 uint32_t *_rto_usecs)
1177 * Define an Retransmission Timeout
1178 * of 1 second, if there's no way for the
1179 * kernel to tell us the current value.
1181 uint32_t rto_usecs = 1000000;
1183 #ifdef __HAVE_TCP_INFO_RTO
1185 struct tcp_info info;
1186 socklen_t ilen = sizeof(info);
1190 ret = getsockopt(xconn->transport.sock,
1191 IPPROTO_TCP, TCP_INFO,
1192 (void *)&info, &ilen);
1194 int saved_errno = errno;
1195 NTSTATUS status = map_nt_error_from_unix(errno);
1196 DBG_ERR("getsockopt(TCP_INFO) errno[%d/%s] -s %s\n",
1197 saved_errno, strerror(saved_errno),
1202 DBG_DEBUG("tcpi_rto[%u] tcpi_rtt[%u] tcpi_rttvar[%u]\n",
1203 (unsigned)info.tcpi_rto,
1204 (unsigned)info.tcpi_rtt,
1205 (unsigned)info.tcpi_rttvar);
1206 rto_usecs = info.tcpi_rto;
1208 #endif /* __HAVE_TCP_INFO_RTO */
1210 rto_usecs = MAX(rto_usecs, 200000); /* at least 0.2s */
1211 rto_usecs = MIN(rto_usecs, 1000000); /* at max 1.0s */
1212 *_rto_usecs = rto_usecs;
1213 return NT_STATUS_OK;
1216 static NTSTATUS smbXsrv_connection_get_acked_bytes(struct smbXsrv_connection *xconn,
1217 uint64_t *_acked_bytes)
1220 * Unless the kernel has an interface
1221 * to reveal the number of un-acked bytes
1222 * in the socket send queue, we'll assume
1223 * everything is already acked.
1225 * But that would mean that we better don't
1226 * pretent to support multi-channel.
1228 uint64_t unacked_bytes = 0;
1232 if (xconn->ack.force_unacked_timeout) {
1234 * Smbtorture tries to test channel failures...
1235 * Just pretend nothing was acked...
1237 DBG_INFO("Simulating channel failure: "
1238 "xconn->ack.unacked_bytes[%llu]\n",
1239 (unsigned long long)xconn->ack.unacked_bytes);
1240 return NT_STATUS_OK;
1243 #ifdef __IOCTL_SEND_QUEUE_SIZE_OPCODE
1249 * If we have kernel support to get
1250 * the number of bytes waiting in
1251 * the socket's send queue, we
1252 * use that in order to find out
1253 * the number of unacked bytes.
1255 ret = ioctl(xconn->transport.sock,
1256 __IOCTL_SEND_QUEUE_SIZE_OPCODE,
1259 int saved_errno = errno;
1260 NTSTATUS status = map_nt_error_from_unix(saved_errno);
1261 DBG_ERR("Failed to get the SEND_QUEUE_SIZE - "
1262 "errno %d (%s) - %s\n",
1263 saved_errno, strerror(saved_errno),
1269 DBG_ERR("xconn->ack.unacked_bytes[%llu] value[%d]\n",
1270 (unsigned long long)xconn->ack.unacked_bytes,
1272 return NT_STATUS_INTERNAL_ERROR;
1274 unacked_bytes = value;
1277 if (xconn->ack.unacked_bytes == 0) {
1278 xconn->ack.unacked_bytes = unacked_bytes;
1279 return NT_STATUS_OK;
1282 if (xconn->ack.unacked_bytes < unacked_bytes) {
1283 DBG_ERR("xconn->ack.unacked_bytes[%llu] unacked_bytes[%llu]\n",
1284 (unsigned long long)xconn->ack.unacked_bytes,
1285 (unsigned long long)unacked_bytes);
1286 return NT_STATUS_INTERNAL_ERROR;
1289 *_acked_bytes = xconn->ack.unacked_bytes - unacked_bytes;
1290 xconn->ack.unacked_bytes = unacked_bytes;
1291 return NT_STATUS_OK;
1294 static void smbd_smb2_send_queue_ack_fail(struct smbd_smb2_send_queue **queue,
1297 struct smbd_smb2_send_queue *e = NULL;
1298 struct smbd_smb2_send_queue *n = NULL;
1300 for (e = *queue; e != NULL; e = n) {
1303 DLIST_REMOVE(*queue, e);
1304 if (e->ack.req != NULL) {
1305 tevent_req_nterror(e->ack.req, status);
1310 static NTSTATUS smbd_smb2_send_queue_ack_bytes(struct smbd_smb2_send_queue **queue,
1311 uint64_t acked_bytes)
1313 struct smbd_smb2_send_queue *e = NULL;
1314 struct smbd_smb2_send_queue *n = NULL;
1316 for (e = *queue; e != NULL; e = n) {
1321 if (e->ack.req == NULL) {
1325 if (e->ack.required_acked_bytes <= acked_bytes) {
1326 e->ack.required_acked_bytes = 0;
1327 DLIST_REMOVE(*queue, e);
1328 tevent_req_done(e->ack.req);
1331 e->ack.required_acked_bytes -= acked_bytes;
1333 expired = timeval_expired(&e->ack.timeout);
1335 return NT_STATUS_IO_TIMEOUT;
1339 return NT_STATUS_OK;
1342 static NTSTATUS smbd_smb2_check_ack_queue(struct smbXsrv_connection *xconn)
1344 uint64_t acked_bytes = 0;
1347 status = smbXsrv_connection_get_acked_bytes(xconn, &acked_bytes);
1348 if (!NT_STATUS_IS_OK(status)) {
1352 status = smbd_smb2_send_queue_ack_bytes(&xconn->ack.queue, acked_bytes);
1353 if (!NT_STATUS_IS_OK(status)) {
1357 status = smbd_smb2_send_queue_ack_bytes(&xconn->smb2.send_queue, 0);
1358 if (!NT_STATUS_IS_OK(status)) {
1362 return NT_STATUS_OK;
1365 static void smbXsrv_connection_ack_checker(struct tevent_req *subreq)
1367 struct smbXsrv_connection *xconn =
1368 tevent_req_callback_data(subreq,
1369 struct smbXsrv_connection);
1370 struct smbXsrv_client *client = xconn->client;
1371 struct timeval next_check;
1375 xconn->ack.checker_subreq = NULL;
1377 ok = tevent_wakeup_recv(subreq);
1378 TALLOC_FREE(subreq);
1380 smbd_server_connection_terminate(xconn,
1381 "tevent_wakeup_recv() failed");
1385 status = smbd_smb2_check_ack_queue(xconn);
1386 if (!NT_STATUS_IS_OK(status)) {
1387 smbd_server_connection_terminate(xconn, nt_errstr(status));
1391 next_check = timeval_current_ofs_usec(xconn->ack.rto_usecs);
1392 xconn->ack.checker_subreq = tevent_wakeup_send(xconn,
1395 if (xconn->ack.checker_subreq == NULL) {
1396 smbd_server_connection_terminate(xconn,
1397 "tevent_wakeup_send() failed");
1400 tevent_req_set_callback(xconn->ack.checker_subreq,
1401 smbXsrv_connection_ack_checker,
1405 static NTSTATUS smbXsrv_client_pending_breaks_updated(struct smbXsrv_client *client)
1407 struct smbXsrv_connection *xconn = NULL;
1409 for (xconn = client->connections; xconn != NULL; xconn = xconn->next) {
1410 struct timeval next_check;
1411 uint64_t acked_bytes = 0;
1415 * A new 'pending break cycle' starts
1416 * with a first pending break and lasts until
1417 * all pending breaks are finished.
1419 * This is typically a very short time,
1420 * the value of one retransmission timeout.
1423 if (client->pending_breaks == NULL) {
1425 * No more pending breaks, remove a pending
1428 TALLOC_FREE(xconn->ack.checker_subreq);
1432 if (xconn->ack.checker_subreq != NULL) {
1434 * The cycle already started =>
1441 * Get the current retransmission timeout value.
1443 * It may change over time, but fetching it once
1444 * per 'pending break' cycled should be enough.
1446 status = smbXsrv_connection_get_rto_usecs(xconn,
1447 &xconn->ack.rto_usecs);
1448 if (!NT_STATUS_IS_OK(status)) {
1453 * At the start of the cycle we reset the
1454 * unacked_bytes counter (first to 0 and
1455 * within smbXsrv_connection_get_acked_bytes()
1456 * to the current value in the kernel
1459 xconn->ack.unacked_bytes = 0;
1460 status = smbXsrv_connection_get_acked_bytes(xconn, &acked_bytes);
1461 if (!NT_STATUS_IS_OK(status)) {
1466 * We setup a timer in order to check for
1467 * acked bytes after one retransmission timeout.
1469 * The code that sets up the send_queue.ack.timeout
1470 * uses a multiple of the retransmission timeout.
1472 next_check = timeval_current_ofs_usec(xconn->ack.rto_usecs);
1473 xconn->ack.checker_subreq = tevent_wakeup_send(xconn,
1476 if (xconn->ack.checker_subreq == NULL) {
1477 return NT_STATUS_NO_MEMORY;
1479 tevent_req_set_callback(xconn->ack.checker_subreq,
1480 smbXsrv_connection_ack_checker,
1484 return NT_STATUS_OK;
1487 void smbXsrv_connection_disconnect_transport(struct smbXsrv_connection *xconn,
1490 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
1494 xconn->transport.status = status;
1495 TALLOC_FREE(xconn->transport.fde);
1496 if (xconn->transport.sock != -1) {
1497 xconn->transport.sock = -1;
1499 smbd_smb2_send_queue_ack_fail(&xconn->ack.queue, status);
1500 smbd_smb2_send_queue_ack_fail(&xconn->smb2.send_queue, status);
1501 xconn->smb2.send_queue_len = 0;
1502 DO_PROFILE_INC(disconnect);
1505 size_t smbXsrv_client_valid_connections(struct smbXsrv_client *client)
1507 struct smbXsrv_connection *xconn = NULL;
1510 for (xconn = client->connections; xconn != NULL; xconn = xconn->next) {
1511 if (NT_STATUS_IS_OK(xconn->transport.status)) {
1519 struct smbXsrv_connection_shutdown_state {
1520 struct tevent_queue *wait_queue;
1521 struct smbXsrv_connection *xconn;
1524 static void smbXsrv_connection_shutdown_wait_done(struct tevent_req *subreq);
1526 static struct tevent_req *smbXsrv_connection_shutdown_send(TALLOC_CTX *mem_ctx,
1527 struct tevent_context *ev,
1528 struct smbXsrv_connection *xconn)
1530 struct tevent_req *req = NULL;
1531 struct smbXsrv_connection_shutdown_state *state = NULL;
1532 struct tevent_req *subreq = NULL;
1534 struct smbd_smb2_request *preq = NULL;
1538 * The caller should have called
1539 * smbXsrv_connection_disconnect_transport() before.
1541 SMB_ASSERT(!NT_STATUS_IS_OK(xconn->transport.status));
1542 SMB_ASSERT(xconn->transport.terminating);
1544 req = tevent_req_create(mem_ctx, &state,
1545 struct smbXsrv_connection_shutdown_state);
1550 state->xconn = xconn;
1551 tevent_req_defer_callback(req, ev);
1553 status = smbXsrv_session_disconnect_xconn(xconn);
1554 if (tevent_req_nterror(req, status)) {
1555 return tevent_req_post(req, ev);
1558 state->wait_queue = tevent_queue_create(state, "smbXsrv_connection_shutdown_queue");
1559 if (tevent_req_nomem(state->wait_queue, req)) {
1560 return tevent_req_post(req, ev);
1563 for (preq = xconn->smb2.requests; preq != NULL; preq = preq->next) {
1565 * The connection is gone so we
1566 * don't need to take care of
1569 preq->session = NULL;
1570 preq->do_signing = false;
1571 preq->do_encryption = false;
1572 preq->preauth = NULL;
1574 if (preq->subreq != NULL) {
1575 tevent_req_cancel(preq->subreq);
1579 * Now wait until the request is finished.
1581 * We don't set a callback, as we just want to block the
1582 * wait queue and the talloc_free() of the request will
1583 * remove the item from the wait queue.
1585 subreq = tevent_queue_wait_send(preq, ev, state->wait_queue);
1586 if (tevent_req_nomem(subreq, req)) {
1587 return tevent_req_post(req, ev);
1591 len = tevent_queue_length(state->wait_queue);
1593 tevent_req_done(req);
1594 return tevent_req_post(req, ev);
1598 * Now we add our own waiter to the end of the queue,
1599 * this way we get notified when all pending requests are finished
1600 * and send to the socket.
1602 subreq = tevent_queue_wait_send(state, ev, state->wait_queue);
1603 if (tevent_req_nomem(subreq, req)) {
1604 return tevent_req_post(req, ev);
1606 tevent_req_set_callback(subreq, smbXsrv_connection_shutdown_wait_done, req);
1611 static void smbXsrv_connection_shutdown_wait_done(struct tevent_req *subreq)
1613 struct tevent_req *req =
1614 tevent_req_callback_data(subreq,
1616 struct smbXsrv_connection_shutdown_state *state =
1617 tevent_req_data(req,
1618 struct smbXsrv_connection_shutdown_state);
1619 struct smbXsrv_connection *xconn = state->xconn;
1621 tevent_queue_wait_recv(subreq);
1622 TALLOC_FREE(subreq);
1624 tevent_req_done(req);
1626 * make sure the xconn pointer is still valid,
1627 * it should as we used tevent_req_defer_callback()
1629 SMB_ASSERT(xconn->transport.terminating);
1632 static NTSTATUS smbXsrv_connection_shutdown_recv(struct tevent_req *req)
1634 struct smbXsrv_connection_shutdown_state *state =
1635 tevent_req_data(req,
1636 struct smbXsrv_connection_shutdown_state);
1637 struct smbXsrv_connection *xconn = state->xconn;
1639 * make sure the xconn pointer is still valid,
1640 * it should as we used tevent_req_defer_callback()
1642 SMB_ASSERT(xconn->transport.terminating);
1643 return tevent_req_simple_recv_ntstatus(req);
1646 static void smbd_server_connection_terminate_done(struct tevent_req *subreq)
1648 struct smbXsrv_connection *xconn =
1649 tevent_req_callback_data(subreq,
1650 struct smbXsrv_connection);
1651 struct smbXsrv_client *client = xconn->client;
1654 status = smbXsrv_connection_shutdown_recv(subreq);
1655 if (!NT_STATUS_IS_OK(status)) {
1656 exit_server("smbXsrv_connection_shutdown_recv failed");
1659 DLIST_REMOVE(client->connections, xconn);
1663 void smbd_server_connection_terminate_ex(struct smbXsrv_connection *xconn,
1665 const char *location)
1667 struct smbXsrv_client *client = xconn->client;
1671 * Make sure that no new request will be able to use this session.
1673 * smbXsrv_connection_disconnect_transport() might be called already,
1674 * but calling it again is a no-op.
1676 smbXsrv_connection_disconnect_transport(xconn,
1677 NT_STATUS_CONNECTION_DISCONNECTED);
1679 num_ok = smbXsrv_client_valid_connections(client);
1681 if (xconn->transport.terminating) {
1682 DBG_DEBUG("skip recursion conn[%s] num_ok[%zu] reason[%s] at %s\n",
1683 smbXsrv_connection_dbg(xconn), num_ok,
1687 xconn->transport.terminating = true;
1689 DBG_DEBUG("conn[%s] num_ok[%zu] reason[%s] at %s\n",
1690 smbXsrv_connection_dbg(xconn), num_ok,
1693 if (xconn->has_ctdb_public_ip) {
1695 * If the connection has a ctdb public address
1696 * we disconnect all client connections,
1697 * as the public address might be moved to
1700 * In future we may recheck which node currently
1701 * holds this address, but for now we keep it simple.
1703 smbd_server_disconnect_client_ex(xconn->client,
1710 struct tevent_req *subreq = NULL;
1712 subreq = smbXsrv_connection_shutdown_send(client,
1715 if (subreq == NULL) {
1716 exit_server("smbXsrv_connection_shutdown_send failed");
1718 tevent_req_set_callback(subreq,
1719 smbd_server_connection_terminate_done,
1725 * The last connection was disconnected
1727 exit_server_cleanly(reason);
1730 void smbd_server_disconnect_client_ex(struct smbXsrv_client *client,
1732 const char *location)
1736 num_ok = smbXsrv_client_valid_connections(client);
1738 DBG_WARNING("client[%s] num_ok[%zu] reason[%s] at %s\n",
1739 client->global->remote_address, num_ok,
1743 * Something bad happened we need to disconnect all connections.
1745 exit_server_cleanly(reason);
1748 static bool dup_smb2_vec4(TALLOC_CTX *ctx,
1749 struct iovec *outvec,
1750 const struct iovec *srcvec)
1752 const uint8_t *srctf;
1754 const uint8_t *srchdr;
1756 const uint8_t *srcbody;
1758 const uint8_t *expected_srcbody;
1759 const uint8_t *srcdyn;
1761 const uint8_t *expected_srcdyn;
1767 srctf = (const uint8_t *)srcvec[SMBD_SMB2_TF_IOV_OFS].iov_base;
1768 srctf_len = srcvec[SMBD_SMB2_TF_IOV_OFS].iov_len;
1769 srchdr = (const uint8_t *)srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_base;
1770 srchdr_len = srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_len;
1771 srcbody = (const uint8_t *)srcvec[SMBD_SMB2_BODY_IOV_OFS].iov_base;
1772 srcbody_len = srcvec[SMBD_SMB2_BODY_IOV_OFS].iov_len;
1773 expected_srcbody = srchdr + SMB2_HDR_BODY;
1774 srcdyn = (const uint8_t *)srcvec[SMBD_SMB2_DYN_IOV_OFS].iov_base;
1775 srcdyn_len = srcvec[SMBD_SMB2_DYN_IOV_OFS].iov_len;
1776 expected_srcdyn = srcbody + 8;
1778 if ((srctf_len != SMB2_TF_HDR_SIZE) && (srctf_len != 0)) {
1782 if (srchdr_len != SMB2_HDR_BODY) {
1786 if (srctf_len == SMB2_TF_HDR_SIZE) {
1787 dsttf = talloc_memdup(ctx, srctf, SMB2_TF_HDR_SIZE);
1788 if (dsttf == NULL) {
1794 outvec[SMBD_SMB2_TF_IOV_OFS].iov_base = (void *)dsttf;
1795 outvec[SMBD_SMB2_TF_IOV_OFS].iov_len = srctf_len;
1797 /* vec[SMBD_SMB2_HDR_IOV_OFS] is always boilerplate and must
1798 * be allocated with size OUTVEC_ALLOC_SIZE. */
1800 dsthdr = talloc_memdup(ctx, srchdr, OUTVEC_ALLOC_SIZE);
1801 if (dsthdr == NULL) {
1804 outvec[SMBD_SMB2_HDR_IOV_OFS].iov_base = (void *)dsthdr;
1805 outvec[SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
1808 * If this is a "standard" vec[SMBD_SMB2_BOFY_IOV_OFS] of length 8,
1809 * pointing to srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_base + SMB2_HDR_BODY,
1810 * then duplicate this. Else use talloc_memdup().
1813 if ((srcbody == expected_srcbody) && (srcbody_len == 8)) {
1814 dstbody = dsthdr + SMB2_HDR_BODY;
1816 dstbody = talloc_memdup(ctx, srcbody, srcbody_len);
1817 if (dstbody == NULL) {
1821 outvec[SMBD_SMB2_BODY_IOV_OFS].iov_base = (void *)dstbody;
1822 outvec[SMBD_SMB2_BODY_IOV_OFS].iov_len = srcbody_len;
1825 * If this is a "standard" vec[SMBD_SMB2_DYN_IOV_OFS] of length 1,
1827 * srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_base + 8
1828 * then duplicate this. Else use talloc_memdup().
1831 if ((srcdyn == expected_srcdyn) && (srcdyn_len == 1)) {
1832 dstdyn = dsthdr + SMB2_HDR_BODY + 8;
1833 } else if (srcdyn == NULL) {
1836 dstdyn = talloc_memdup(ctx, srcdyn, srcdyn_len);
1837 if (dstdyn == NULL) {
1841 outvec[SMBD_SMB2_DYN_IOV_OFS].iov_base = (void *)dstdyn;
1842 outvec[SMBD_SMB2_DYN_IOV_OFS].iov_len = srcdyn_len;
1847 static struct smbd_smb2_request *dup_smb2_req(const struct smbd_smb2_request *req)
1849 struct smbd_smb2_request *newreq = NULL;
1850 struct iovec *outvec = NULL;
1851 int count = req->out.vector_count;
1855 newreq = smbd_smb2_request_allocate(req->xconn);
1860 newreq->sconn = req->sconn;
1861 newreq->xconn = req->xconn;
1862 newreq->session = req->session;
1863 newreq->do_encryption = req->do_encryption;
1864 newreq->do_signing = req->do_signing;
1865 newreq->current_idx = req->current_idx;
1867 outvec = talloc_zero_array(newreq, struct iovec, count);
1869 TALLOC_FREE(newreq);
1872 newreq->out.vector = outvec;
1873 newreq->out.vector_count = count;
1875 /* Setup the outvec's identically to req. */
1876 outvec[0].iov_base = newreq->out.nbt_hdr;
1877 outvec[0].iov_len = 4;
1878 memcpy(newreq->out.nbt_hdr, req->out.nbt_hdr, 4);
1880 /* Setup the vectors identically to the ones in req. */
1881 for (i = 1; i < count; i += SMBD_SMB2_NUM_IOV_PER_REQ) {
1882 if (!dup_smb2_vec4(outvec, &outvec[i], &req->out.vector[i])) {
1889 TALLOC_FREE(newreq);
1893 ok = smb2_setup_nbt_length(newreq->out.vector,
1894 newreq->out.vector_count);
1896 TALLOC_FREE(newreq);
1903 static NTSTATUS smb2_send_async_interim_response(const struct smbd_smb2_request *req)
1905 struct smbXsrv_connection *xconn = req->xconn;
1907 struct iovec *firsttf = NULL;
1908 struct iovec *outhdr_v = NULL;
1909 uint8_t *outhdr = NULL;
1910 struct smbd_smb2_request *nreq = NULL;
1914 /* Create a new smb2 request we'll use
1915 for the interim return. */
1916 nreq = dup_smb2_req(req);
1918 return NT_STATUS_NO_MEMORY;
1921 /* Lose the last X out vectors. They're the
1922 ones we'll be using for the async reply. */
1923 nreq->out.vector_count -= SMBD_SMB2_NUM_IOV_PER_REQ;
1925 ok = smb2_setup_nbt_length(nreq->out.vector,
1926 nreq->out.vector_count);
1928 return NT_STATUS_INVALID_PARAMETER_MIX;
1931 /* Step back to the previous reply. */
1932 nreq->current_idx -= SMBD_SMB2_NUM_IOV_PER_REQ;
1933 firsttf = SMBD_SMB2_IDX_TF_IOV(nreq,out,first_idx);
1934 outhdr_v = SMBD_SMB2_OUT_HDR_IOV(nreq);
1935 outhdr = SMBD_SMB2_OUT_HDR_PTR(nreq);
1936 /* And end the chain. */
1937 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, 0);
1939 /* Calculate outgoing credits */
1940 smb2_calculate_credits(req, nreq);
1942 if (DEBUGLEVEL >= 0) {
1943 dbgtext("smb2_send_async_interim_response: nreq->current_idx = %u\n",
1944 (unsigned int)nreq->current_idx );
1945 dbgtext("smb2_send_async_interim_response: returning %u vectors\n",
1946 (unsigned int)nreq->out.vector_count );
1947 print_req_vectors(nreq);
1951 * As we have changed the header (SMB2_HDR_NEXT_COMMAND),
1952 * we need to sign/encrypt here with the last/first key we remembered
1954 if (firsttf->iov_len == SMB2_TF_HDR_SIZE) {
1955 struct smb2_signing_key key = {
1956 .blob = req->first_key,
1958 status = smb2_signing_encrypt_pdu(&key,
1959 xconn->smb2.server.cipher,
1961 nreq->out.vector_count - first_idx);
1962 smb2_signing_key_destructor(&key);
1963 if (!NT_STATUS_IS_OK(status)) {
1966 } else if (req->last_key.length > 0) {
1967 struct smb2_signing_key key = {
1968 .blob = req->last_key,
1971 status = smb2_signing_sign_pdu(&key,
1974 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
1975 smb2_signing_key_destructor(&key);
1976 if (!NT_STATUS_IS_OK(status)) {
1981 nreq->queue_entry.mem_ctx = nreq;
1982 nreq->queue_entry.vector = nreq->out.vector;
1983 nreq->queue_entry.count = nreq->out.vector_count;
1984 nreq->queue_entry.xconn = xconn;
1985 DLIST_ADD_END(xconn->smb2.send_queue, &nreq->queue_entry);
1986 xconn->smb2.send_queue_len++;
1988 DBG_ERR("queue e[%p]\n", &nreq->queue_entry);
1989 status = smbd_smb2_flush_send_queue(xconn);
1990 if (!NT_STATUS_IS_OK(status)) {
1994 return NT_STATUS_OK;
1997 struct smbd_smb2_request_pending_state {
1998 struct smbd_smb2_send_queue queue_entry;
1999 uint8_t buf[NBT_HDR_SIZE + SMB2_TF_HDR_SIZE + SMB2_HDR_BODY + 0x08 + 1];
2000 struct iovec vector[1 + SMBD_SMB2_NUM_IOV_PER_REQ];
2003 static void smbd_smb2_request_pending_timer(struct tevent_context *ev,
2004 struct tevent_timer *te,
2005 struct timeval current_time,
2006 void *private_data);
2008 NTSTATUS smbd_smb2_request_pending_queue(struct smbd_smb2_request *req,
2009 struct tevent_req *subreq,
2010 uint32_t defer_time)
2013 struct timeval defer_endtime;
2014 uint8_t *outhdr = NULL;
2017 if (!tevent_req_is_in_progress(subreq)) {
2019 * This is a performance optimization,
2020 * it avoids one tevent_loop iteration,
2021 * which means we avoid one
2022 * talloc_stackframe_pool/talloc_free pair.
2024 tevent_req_notify_callback(subreq);
2025 return NT_STATUS_OK;
2028 req->subreq = subreq;
2031 if (req->async_te) {
2032 /* We're already async. */
2033 return NT_STATUS_OK;
2036 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
2037 flags = IVAL(outhdr, SMB2_HDR_FLAGS);
2038 if (flags & SMB2_HDR_FLAG_ASYNC) {
2039 /* We're already async. */
2040 return NT_STATUS_OK;
2043 if (req->async_internal || defer_time == 0) {
2045 * An SMB2 request implementation wants to handle the request
2046 * asynchronously "internally" while keeping synchronous
2047 * behaviour for the SMB2 request. This means we don't send an
2048 * interim response and we can allow processing of compound SMB2
2049 * requests (cf the subsequent check) for all cases.
2051 return NT_STATUS_OK;
2054 if (req->in.vector_count > req->current_idx + SMBD_SMB2_NUM_IOV_PER_REQ) {
2056 * We're trying to go async in a compound request
2057 * chain. This is only allowed for opens that cause an
2058 * oplock break or for the last operation in the
2059 * chain, otherwise it is not allowed. See
2060 * [MS-SMB2].pdf note <206> on Section 3.3.5.2.7.
2062 const uint8_t *inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2064 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_CREATE) {
2066 * Cancel the outstanding request.
2068 bool ok = tevent_req_cancel(req->subreq);
2070 return NT_STATUS_OK;
2072 TALLOC_FREE(req->subreq);
2073 return smbd_smb2_request_error(req,
2074 NT_STATUS_INTERNAL_ERROR);
2078 if (DEBUGLEVEL >= 10) {
2079 dbgtext("smbd_smb2_request_pending_queue: req->current_idx = %u\n",
2080 (unsigned int)req->current_idx );
2081 print_req_vectors(req);
2084 if (req->current_idx > 1) {
2086 * We're going async in a compound
2087 * chain after the first request has
2088 * already been processed. Send an
2089 * interim response containing the
2090 * set of replies already generated.
2092 int idx = req->current_idx;
2094 status = smb2_send_async_interim_response(req);
2095 if (!NT_STATUS_IS_OK(status)) {
2098 if (req->first_key.length > 0) {
2099 data_blob_clear_free(&req->first_key);
2102 req->current_idx = 1;
2105 * Re-arrange the in.vectors to remove what
2108 memmove(&req->in.vector[1],
2109 &req->in.vector[idx],
2110 sizeof(req->in.vector[0])*(req->in.vector_count - idx));
2111 req->in.vector_count = 1 + (req->in.vector_count - idx);
2113 /* Re-arrange the out.vectors to match. */
2114 memmove(&req->out.vector[1],
2115 &req->out.vector[idx],
2116 sizeof(req->out.vector[0])*(req->out.vector_count - idx));
2117 req->out.vector_count = 1 + (req->out.vector_count - idx);
2119 if (req->in.vector_count == 1 + SMBD_SMB2_NUM_IOV_PER_REQ) {
2121 * We only have one remaining request as
2122 * we've processed everything else.
2123 * This is no longer a compound request.
2125 req->compound_related = false;
2126 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
2127 flags = (IVAL(outhdr, SMB2_HDR_FLAGS) & ~SMB2_HDR_FLAG_CHAINED);
2128 SIVAL(outhdr, SMB2_HDR_FLAGS, flags);
2131 if (req->last_key.length > 0) {
2132 data_blob_clear_free(&req->last_key);
2136 * smbd_smb2_request_pending_timer() just send a packet
2137 * to the client and doesn't need any impersonation.
2138 * So we use req->xconn->client->raw_ev_ctx instead
2139 * of req->ev_ctx here.
2141 defer_endtime = timeval_current_ofs_usec(defer_time);
2142 req->async_te = tevent_add_timer(req->xconn->client->raw_ev_ctx,
2144 smbd_smb2_request_pending_timer,
2146 if (req->async_te == NULL) {
2147 return NT_STATUS_NO_MEMORY;
2150 return NT_STATUS_OK;
2154 struct smb2_signing_key *smbd_smb2_signing_key(struct smbXsrv_session *session,
2155 struct smbXsrv_connection *xconn)
2157 struct smbXsrv_channel_global0 *c = NULL;
2159 struct smb2_signing_key *key = NULL;
2161 status = smbXsrv_session_find_channel(session, xconn, &c);
2162 if (NT_STATUS_IS_OK(status)) {
2163 key = c->signing_key;
2166 if (!smb2_signing_key_valid(key)) {
2167 key = session->global->signing_key;
2173 static NTSTATUS smb2_get_new_nonce(struct smbXsrv_session *session,
2174 uint64_t *new_nonce_high,
2175 uint64_t *new_nonce_low)
2177 uint64_t nonce_high;
2180 session->nonce_low += 1;
2181 if (session->nonce_low == 0) {
2182 session->nonce_low += 1;
2183 session->nonce_high += 1;
2187 * CCM and GCM algorithms must never have their
2188 * nonce wrap, or the security of the whole
2189 * communication and the keys is destroyed.
2190 * We must drop the connection once we have
2191 * transfered too much data.
2193 * NOTE: We assume nonces greater than 8 bytes.
2195 if (session->nonce_high >= session->nonce_high_max) {
2196 return NT_STATUS_ENCRYPTION_FAILED;
2199 nonce_high = session->nonce_high_random;
2200 nonce_high += session->nonce_high;
2201 nonce_low = session->nonce_low;
2203 *new_nonce_high = nonce_high;
2204 *new_nonce_low = nonce_low;
2205 return NT_STATUS_OK;
2208 static void smbd_smb2_request_pending_timer(struct tevent_context *ev,
2209 struct tevent_timer *te,
2210 struct timeval current_time,
2213 struct smbd_smb2_request *req =
2214 talloc_get_type_abort(private_data,
2215 struct smbd_smb2_request);
2216 struct smbXsrv_connection *xconn = req->xconn;
2217 struct smbd_smb2_request_pending_state *state = NULL;
2218 uint8_t *outhdr = NULL;
2219 const uint8_t *inhdr = NULL;
2221 uint8_t *hdr = NULL;
2222 uint8_t *body = NULL;
2223 uint8_t *dyn = NULL;
2225 uint64_t message_id = 0;
2226 uint64_t async_id = 0;
2230 TALLOC_FREE(req->async_te);
2232 /* Ensure our final reply matches the interim one. */
2233 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2234 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
2235 flags = IVAL(outhdr, SMB2_HDR_FLAGS);
2236 message_id = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
2238 async_id = message_id; /* keep it simple for now... */
2240 SIVAL(outhdr, SMB2_HDR_FLAGS, flags | SMB2_HDR_FLAG_ASYNC);
2241 SBVAL(outhdr, SMB2_HDR_ASYNC_ID, async_id);
2243 DEBUG(0,("smbd_smb2_request_pending_queue: opcode[%s] mid %llu "
2245 smb2_opcode_name(SVAL(inhdr, SMB2_HDR_OPCODE)),
2246 (unsigned long long)async_id ));
2249 * What we send is identical to a smbd_smb2_request_error
2250 * packet with an error status of STATUS_PENDING. Make use
2251 * of this fact sometime when refactoring. JRA.
2254 state = talloc_zero(req->xconn, struct smbd_smb2_request_pending_state);
2255 if (state == NULL) {
2256 smbd_server_connection_terminate(xconn,
2257 nt_errstr(NT_STATUS_NO_MEMORY));
2261 tf = state->buf + NBT_HDR_SIZE;
2263 hdr = tf + SMB2_TF_HDR_SIZE;
2264 body = hdr + SMB2_HDR_BODY;
2267 if (req->do_encryption) {
2268 uint64_t nonce_high = 0;
2269 uint64_t nonce_low = 0;
2270 uint64_t session_id = req->session->global->session_wire_id;
2272 status = smb2_get_new_nonce(req->session,
2275 if (!NT_STATUS_IS_OK(status)) {
2276 smbd_server_connection_terminate(xconn,
2281 SIVAL(tf, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
2282 SBVAL(tf, SMB2_TF_NONCE+0, nonce_low);
2283 SBVAL(tf, SMB2_TF_NONCE+8, nonce_high);
2284 SBVAL(tf, SMB2_TF_SESSION_ID, session_id);
2287 SIVAL(hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2288 SSVAL(hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2289 SSVAL(hdr, SMB2_HDR_EPOCH, 0);
2290 SIVAL(hdr, SMB2_HDR_STATUS, NT_STATUS_V(NT_STATUS_PENDING));
2291 SSVAL(hdr, SMB2_HDR_OPCODE, SVAL(outhdr, SMB2_HDR_OPCODE));
2293 SIVAL(hdr, SMB2_HDR_FLAGS, flags);
2294 SIVAL(hdr, SMB2_HDR_NEXT_COMMAND, 0);
2295 SBVAL(hdr, SMB2_HDR_MESSAGE_ID, message_id);
2296 SBVAL(hdr, SMB2_HDR_PID, async_id);
2297 SBVAL(hdr, SMB2_HDR_SESSION_ID,
2298 BVAL(outhdr, SMB2_HDR_SESSION_ID));
2299 memcpy(hdr+SMB2_HDR_SIGNATURE,
2300 outhdr+SMB2_HDR_SIGNATURE, 16);
2302 SSVAL(body, 0x00, 0x08 + 1);
2304 SCVAL(body, 0x02, 0);
2305 SCVAL(body, 0x03, 0);
2306 SIVAL(body, 0x04, 0);
2307 /* Match W2K8R2... */
2308 SCVAL(dyn, 0x00, 0x21);
2310 state->vector[0].iov_base = (void *)state->buf;
2311 state->vector[0].iov_len = NBT_HDR_SIZE;
2313 if (req->do_encryption) {
2314 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_base = tf;
2315 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_len =
2318 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_base = NULL;
2319 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_len = 0;
2322 state->vector[1+SMBD_SMB2_HDR_IOV_OFS].iov_base = hdr;
2323 state->vector[1+SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
2325 state->vector[1+SMBD_SMB2_BODY_IOV_OFS].iov_base = body;
2326 state->vector[1+SMBD_SMB2_BODY_IOV_OFS].iov_len = 8;
2328 state->vector[1+SMBD_SMB2_DYN_IOV_OFS].iov_base = dyn;
2329 state->vector[1+SMBD_SMB2_DYN_IOV_OFS].iov_len = 1;
2331 ok = smb2_setup_nbt_length(state->vector,
2332 1 + SMBD_SMB2_NUM_IOV_PER_REQ);
2334 smbd_server_connection_terminate(
2335 xconn, nt_errstr(NT_STATUS_INTERNAL_ERROR));
2339 /* Ensure we correctly go through crediting. Grant
2340 the credits now, and zero credits on the final
2342 smb2_set_operation_credit(req->xconn,
2343 SMBD_SMB2_IN_HDR_IOV(req),
2344 &state->vector[1+SMBD_SMB2_HDR_IOV_OFS]);
2346 SIVAL(hdr, SMB2_HDR_FLAGS, flags | SMB2_HDR_FLAG_ASYNC);
2351 for (i = 0; i < ARRAY_SIZE(state->vector); i++) {
2352 dbgtext("\tstate->vector[%u/%u].iov_len = %u\n",
2354 (unsigned int)ARRAY_SIZE(state->vector),
2355 (unsigned int)state->vector[i].iov_len);
2359 if (req->do_encryption) {
2360 struct smbXsrv_session *x = req->session;
2361 struct smb2_signing_key *encryption_key = x->global->encryption_key;
2363 status = smb2_signing_encrypt_pdu(encryption_key,
2364 xconn->smb2.server.cipher,
2365 &state->vector[1+SMBD_SMB2_TF_IOV_OFS],
2366 SMBD_SMB2_NUM_IOV_PER_REQ);
2367 if (!NT_STATUS_IS_OK(status)) {
2368 smbd_server_connection_terminate(xconn,
2372 } else if (req->do_signing) {
2373 struct smbXsrv_session *x = req->session;
2374 struct smb2_signing_key *signing_key =
2375 smbd_smb2_signing_key(x, xconn);
2377 status = smb2_signing_sign_pdu(signing_key,
2379 &state->vector[1+SMBD_SMB2_HDR_IOV_OFS],
2380 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
2381 if (!NT_STATUS_IS_OK(status)) {
2382 smbd_server_connection_terminate(xconn,
2388 state->queue_entry.mem_ctx = state;
2389 state->queue_entry.vector = state->vector;
2390 state->queue_entry.count = ARRAY_SIZE(state->vector);
2391 state->queue_entry.xconn = xconn;
2392 DLIST_ADD_END(xconn->smb2.send_queue, &state->queue_entry);
2393 xconn->smb2.send_queue_len++;
2395 DBG_ERR("queue e[%p]\n", &state->queue_entry);
2396 status = smbd_smb2_flush_send_queue(xconn);
2397 if (!NT_STATUS_IS_OK(status)) {
2398 smbd_server_connection_terminate(xconn,
2404 static NTSTATUS smbd_smb2_request_process_cancel(struct smbd_smb2_request *req)
2406 struct smbXsrv_connection *xconn = req->xconn;
2407 struct smbd_smb2_request *cur;
2408 const uint8_t *inhdr;
2410 uint64_t search_message_id;
2411 uint64_t search_async_id;
2414 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2416 flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2417 search_message_id = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2418 search_async_id = BVAL(inhdr, SMB2_HDR_PID);
2421 * We don't need the request anymore cancel requests never
2424 * We defer the TALLOC_FREE(req) to the caller.
2426 DLIST_REMOVE(xconn->smb2.requests, req);
2428 for (cur = xconn->smb2.requests; cur; cur = cur->next) {
2429 const uint8_t *outhdr;
2430 uint64_t message_id;
2433 if (cur->compound_related) {
2435 * Never cancel anything in a compound request.
2436 * Way too hard to deal with the result.
2441 outhdr = SMBD_SMB2_OUT_HDR_PTR(cur);
2443 message_id = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
2444 async_id = BVAL(outhdr, SMB2_HDR_PID);
2446 if (flags & SMB2_HDR_FLAG_ASYNC) {
2447 if (search_async_id == async_id) {
2448 found_id = async_id;
2452 if (search_message_id == message_id) {
2453 found_id = message_id;
2459 if (cur && cur->subreq) {
2460 inhdr = SMBD_SMB2_IN_HDR_PTR(cur);
2461 DEBUG(10,("smbd_smb2_request_process_cancel: attempting to "
2462 "cancel opcode[%s] mid %llu\n",
2463 smb2_opcode_name(SVAL(inhdr, SMB2_HDR_OPCODE)),
2464 (unsigned long long)found_id ));
2465 tevent_req_cancel(cur->subreq);
2468 return NT_STATUS_OK;
2471 /*************************************************************
2472 Ensure an incoming tid is a valid one for us to access.
2473 Change to the associated uid credentials and chdir to the
2474 valid tid directory.
2475 *************************************************************/
2477 static NTSTATUS smbd_smb2_request_check_tcon(struct smbd_smb2_request *req)
2479 const uint8_t *inhdr;
2482 struct smbXsrv_tcon *tcon;
2484 NTTIME now = timeval_to_nttime(&req->request_time);
2488 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2490 in_flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2491 in_tid = IVAL(inhdr, SMB2_HDR_TID);
2493 if (in_flags & SMB2_HDR_FLAG_CHAINED) {
2494 in_tid = req->last_tid;
2499 status = smb2srv_tcon_lookup(req->session,
2500 in_tid, now, &tcon);
2501 if (!NT_STATUS_IS_OK(status)) {
2505 if (!change_to_user_and_service(
2507 req->session->global->session_wire_id))
2509 return NT_STATUS_ACCESS_DENIED;
2513 req->last_tid = in_tid;
2515 return NT_STATUS_OK;
2518 /*************************************************************
2519 Ensure an incoming session_id is a valid one for us to access.
2520 *************************************************************/
2522 static NTSTATUS smbd_smb2_request_check_session(struct smbd_smb2_request *req)
2524 const uint8_t *inhdr;
2527 uint64_t in_session_id;
2528 struct smbXsrv_session *session = NULL;
2529 struct auth_session_info *session_info;
2531 NTTIME now = timeval_to_nttime(&req->request_time);
2533 req->session = NULL;
2536 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2538 in_flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2539 in_opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2540 in_session_id = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2542 if (in_flags & SMB2_HDR_FLAG_CHAINED) {
2543 in_session_id = req->last_session_id;
2546 req->last_session_id = 0;
2548 /* look an existing session up */
2549 switch (in_opcode) {
2550 case SMB2_OP_SESSSETUP:
2552 * For a session bind request, we don't have the
2553 * channel set up at this point yet, so we defer
2554 * the verification that the connection belongs
2555 * to the session to the session setup code, which
2556 * can look at the session binding flags.
2558 status = smb2srv_session_lookup_client(req->xconn->client,
2563 status = smb2srv_session_lookup_conn(req->xconn,
2569 req->session = session;
2570 req->last_session_id = in_session_id;
2572 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
2573 switch (in_opcode) {
2574 case SMB2_OP_SESSSETUP:
2575 status = NT_STATUS_OK;
2577 case SMB2_OP_LOGOFF:
2580 case SMB2_OP_CANCEL:
2581 case SMB2_OP_KEEPALIVE:
2583 * [MS-SMB2] 3.3.5.2.9 Verifying the Session
2584 * specifies that LOGOFF, CLOSE and (UN)LOCK
2585 * should always be processed even on expired sessions.
2587 * Also see the logic in
2588 * smbd_smb2_request_process_lock().
2590 * The smb2.session.expire2 test shows that
2591 * CANCEL and KEEPALIVE/ECHO should also
2594 status = NT_STATUS_OK;
2600 if (NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
2601 switch (in_opcode) {
2603 case SMB2_OP_CREATE:
2604 case SMB2_OP_GETINFO:
2605 case SMB2_OP_SETINFO:
2606 return NT_STATUS_INVALID_HANDLE;
2609 * Notice the check for
2610 * (session_info == NULL)
2613 status = NT_STATUS_OK;
2617 if (!NT_STATUS_IS_OK(status)) {
2621 session_info = session->global->auth_session_info;
2622 if (session_info == NULL) {
2623 return NT_STATUS_INVALID_HANDLE;
2626 return NT_STATUS_OK;
2629 NTSTATUS smbd_smb2_request_verify_creditcharge(struct smbd_smb2_request *req,
2630 uint32_t data_length)
2632 struct smbXsrv_connection *xconn = req->xconn;
2633 uint16_t needed_charge;
2634 uint16_t credit_charge = 1;
2635 const uint8_t *inhdr;
2637 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2639 if (xconn->smb2.credits.multicredit) {
2640 credit_charge = SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE);
2641 credit_charge = MAX(credit_charge, 1);
2644 needed_charge = (data_length - 1)/ 65536 + 1;
2646 DBGC_DEBUG(DBGC_SMB2_CREDITS,
2647 "mid %llu, CreditCharge: %d, NeededCharge: %d\n",
2648 (unsigned long long) BVAL(inhdr, SMB2_HDR_MESSAGE_ID),
2649 credit_charge, needed_charge);
2651 if (needed_charge > credit_charge) {
2652 DBGC_WARNING(DBGC_SMB2_CREDITS,
2653 "CreditCharge too low, given %d, needed %d\n",
2654 credit_charge, needed_charge);
2655 return NT_STATUS_INVALID_PARAMETER;
2658 return NT_STATUS_OK;
2661 NTSTATUS smbd_smb2_request_verify_sizes(struct smbd_smb2_request *req,
2662 size_t expected_body_size)
2664 struct iovec *inhdr_v;
2665 const uint8_t *inhdr;
2667 const uint8_t *inbody;
2669 size_t min_dyn_size = expected_body_size & 0x00000001;
2670 int max_idx = req->in.vector_count - SMBD_SMB2_NUM_IOV_PER_REQ;
2673 * The following should be checked already.
2675 if (req->in.vector_count < SMBD_SMB2_NUM_IOV_PER_REQ) {
2676 return NT_STATUS_INTERNAL_ERROR;
2678 if (req->current_idx > max_idx) {
2679 return NT_STATUS_INTERNAL_ERROR;
2682 inhdr_v = SMBD_SMB2_IN_HDR_IOV(req);
2683 if (inhdr_v->iov_len != SMB2_HDR_BODY) {
2684 return NT_STATUS_INTERNAL_ERROR;
2686 if (SMBD_SMB2_IN_BODY_LEN(req) < 2) {
2687 return NT_STATUS_INTERNAL_ERROR;
2690 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2691 opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2695 case SMB2_OP_GETINFO:
2702 * Now check the expected body size,
2703 * where the last byte might be in the
2706 if (SMBD_SMB2_IN_BODY_LEN(req) != (expected_body_size & 0xFFFFFFFE)) {
2707 return NT_STATUS_INVALID_PARAMETER;
2709 if (SMBD_SMB2_IN_DYN_LEN(req) < min_dyn_size) {
2710 return NT_STATUS_INVALID_PARAMETER;
2713 inbody = SMBD_SMB2_IN_BODY_PTR(req);
2715 body_size = SVAL(inbody, 0x00);
2716 if (body_size != expected_body_size) {
2717 return NT_STATUS_INVALID_PARAMETER;
2720 return NT_STATUS_OK;
2723 bool smbXsrv_is_encrypted(uint8_t encryption_flags)
2725 return (!(encryption_flags & SMBXSRV_PROCESSED_UNENCRYPTED_PACKET)
2727 (encryption_flags & (SMBXSRV_PROCESSED_ENCRYPTED_PACKET |
2728 SMBXSRV_ENCRYPTION_DESIRED |
2729 SMBXSRV_ENCRYPTION_REQUIRED)));
2732 bool smbXsrv_is_partially_encrypted(uint8_t encryption_flags)
2734 return ((encryption_flags & SMBXSRV_PROCESSED_ENCRYPTED_PACKET) &&
2735 (encryption_flags & SMBXSRV_PROCESSED_UNENCRYPTED_PACKET));
2738 /* Set a flag if not already set, return true if set */
2739 bool smbXsrv_set_crypto_flag(uint8_t *flags, uint8_t flag)
2741 if ((flag == 0) || (*flags & flag)) {
2750 * Update encryption state tracking flags, this can be used to
2751 * determine whether whether the session or tcon is "encrypted".
2753 static void smb2srv_update_crypto_flags(struct smbd_smb2_request *req,
2755 bool *update_session_globalp,
2756 bool *update_tcon_globalp)
2758 /* Default: assume unecrypted and unsigned */
2759 struct smbXsrv_session *session = req->session;
2760 struct smbXsrv_tcon *tcon = req->tcon;
2761 uint8_t encrypt_flag = SMBXSRV_PROCESSED_UNENCRYPTED_PACKET;
2762 uint8_t sign_flag = SMBXSRV_PROCESSED_UNSIGNED_PACKET;
2763 bool update_session = false;
2764 bool update_tcon = false;
2766 if (req->was_encrypted && req->do_encryption) {
2767 encrypt_flag = SMBXSRV_PROCESSED_ENCRYPTED_PACKET;
2768 sign_flag = SMBXSRV_PROCESSED_SIGNED_PACKET;
2770 /* Unencrypted packet, can be signed */
2771 if (req->do_signing) {
2772 sign_flag = SMBXSRV_PROCESSED_SIGNED_PACKET;
2773 } else if (opcode == SMB2_OP_CANCEL) {
2774 /* Cancel requests are allowed to skip signing */
2775 sign_flag &= ~SMBXSRV_PROCESSED_UNSIGNED_PACKET;
2779 update_session |= smbXsrv_set_crypto_flag(
2780 &session->global->encryption_flags, encrypt_flag);
2781 update_session |= smbXsrv_set_crypto_flag(
2782 &session->global->signing_flags, sign_flag);
2785 update_tcon |= smbXsrv_set_crypto_flag(
2786 &tcon->global->encryption_flags, encrypt_flag);
2787 update_tcon |= smbXsrv_set_crypto_flag(
2788 &tcon->global->signing_flags, sign_flag);
2791 *update_session_globalp = update_session;
2792 *update_tcon_globalp = update_tcon;
2796 bool smbXsrv_is_signed(uint8_t signing_flags)
2799 * Signing is always enabled, so unless we got an unsigned
2800 * packet and at least one signed packet that was not
2801 * encrypted, the session or tcon is "signed".
2803 return (!(signing_flags & SMBXSRV_PROCESSED_UNSIGNED_PACKET) &&
2804 (signing_flags & SMBXSRV_PROCESSED_SIGNED_PACKET));
2807 bool smbXsrv_is_partially_signed(uint8_t signing_flags)
2809 return ((signing_flags & SMBXSRV_PROCESSED_UNSIGNED_PACKET) &&
2810 (signing_flags & SMBXSRV_PROCESSED_SIGNED_PACKET));
2813 static NTSTATUS smbd_smb2_request_dispatch_update_counts(
2814 struct smbd_smb2_request *req,
2817 struct smbXsrv_connection *xconn = req->xconn;
2818 const uint8_t *inhdr;
2819 uint16_t channel_sequence;
2820 uint8_t generation_wrap = 0;
2823 struct smbXsrv_open *op;
2824 bool update_open = false;
2825 NTSTATUS status = NT_STATUS_OK;
2827 SMB_ASSERT(!req->request_counters_updated);
2829 if (xconn->protocol < PROTOCOL_SMB2_22) {
2830 return NT_STATUS_OK;
2833 if (req->compat_chain_fsp == NULL) {
2834 return NT_STATUS_OK;
2837 op = req->compat_chain_fsp->op;
2839 return NT_STATUS_OK;
2842 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2843 flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2844 channel_sequence = SVAL(inhdr, SMB2_HDR_CHANNEL_SEQUENCE);
2846 cmp = channel_sequence - op->global->channel_sequence;
2849 * csn wrap. We need to watch out for long-running
2850 * requests that are still sitting on a previously
2851 * used csn. SMB2_OP_NOTIFY can take VERY long.
2853 generation_wrap += 1;
2856 if (abs(cmp) > INT16_MAX) {
2858 * [MS-SMB2] 3.3.5.2.10 - Verifying the Channel Sequence Number:
2860 * If the channel sequence number of the request and the one
2861 * known to the server are not equal, the channel sequence
2862 * number and outstanding request counts are only updated
2863 * "... if the unsigned difference using 16-bit arithmetic
2864 * between ChannelSequence and Open.ChannelSequence is less than
2865 * or equal to 0x7FFF ...".
2866 * Otherwise, an error is returned for the modifying
2867 * calls write, set_info, and ioctl.
2869 * There are currently two issues with the description:
2871 * * For the other calls, the document seems to imply
2872 * that processing continues without adapting the
2873 * counters (if the sequence numbers are not equal).
2875 * TODO: This needs clarification!
2877 * * Also, the behaviour if the difference is larger
2878 * than 0x7FFF is not clear. The document seems to
2879 * imply that if such a difference is reached,
2880 * the server starts to ignore the counters or
2881 * in the case of the modifying calls, return errors.
2883 * TODO: This needs clarification!
2885 * At this point Samba tries to be a little more
2886 * clever than the description in the MS-SMB2 document
2887 * by heuristically detecting and properly treating
2888 * a 16 bit overflow of the client-submitted sequence
2891 * If the stored channel sequence number is more than
2892 * 0x7FFF larger than the one from the request, then
2893 * the client-provided sequence number has likely
2894 * overflown. We treat this case as valid instead
2897 * The MS-SMB2 behaviour would be setting cmp = -1.
2902 if (flags & SMB2_HDR_FLAG_REPLAY_OPERATION) {
2903 if (cmp == 0 && op->pre_request_count == 0) {
2904 op->request_count += 1;
2905 req->request_counters_updated = true;
2906 } else if (cmp > 0 && op->pre_request_count == 0) {
2907 op->pre_request_count += op->request_count;
2908 op->request_count = 1;
2909 op->global->channel_sequence = channel_sequence;
2910 op->global->channel_generation += generation_wrap;
2912 req->request_counters_updated = true;
2913 } else if (modify_call) {
2914 return NT_STATUS_FILE_NOT_AVAILABLE;
2918 op->request_count += 1;
2919 req->request_counters_updated = true;
2920 } else if (cmp > 0) {
2921 op->pre_request_count += op->request_count;
2922 op->request_count = 1;
2923 op->global->channel_sequence = channel_sequence;
2924 op->global->channel_generation += generation_wrap;
2926 req->request_counters_updated = true;
2927 } else if (modify_call) {
2928 return NT_STATUS_FILE_NOT_AVAILABLE;
2931 req->channel_generation = op->global->channel_generation;
2934 status = smbXsrv_open_update(op);
2940 NTSTATUS smbd_smb2_request_dispatch(struct smbd_smb2_request *req)
2942 struct smbXsrv_connection *xconn = req->xconn;
2943 const struct smbd_smb2_dispatch_table *call = NULL;
2944 const struct iovec *intf_v = SMBD_SMB2_IN_TF_IOV(req);
2945 const uint8_t *inhdr;
2950 NTSTATUS session_status;
2951 uint32_t allowed_flags;
2952 NTSTATUS return_value;
2953 struct smbXsrv_session *x = NULL;
2954 bool signing_required = false;
2955 bool encryption_desired = false;
2956 bool encryption_required = false;
2958 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2960 DO_PROFILE_INC(request);
2962 SMB_ASSERT(!req->request_counters_updated);
2964 /* TODO: verify more things */
2966 flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2967 opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2968 mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2969 DEBUG(0,("smbd_smb2_request_dispatch: opcode[%s] mid = %llu\n",
2970 smb2_opcode_name(opcode),
2971 (unsigned long long)mid));
2973 if (xconn->protocol >= PROTOCOL_SMB2_02) {
2975 * once the protocol is negotiated
2976 * SMB2_OP_NEGPROT is not allowed anymore
2978 if (opcode == SMB2_OP_NEGPROT) {
2979 /* drop the connection */
2980 return NT_STATUS_INVALID_PARAMETER;
2984 * if the protocol is not negotiated yet
2985 * only SMB2_OP_NEGPROT is allowed.
2987 if (opcode != SMB2_OP_NEGPROT) {
2988 /* drop the connection */
2989 return NT_STATUS_INVALID_PARAMETER;
2994 * Check if the client provided a valid session id.
2996 * As some command don't require a valid session id
2997 * we defer the check of the session_status
2999 session_status = smbd_smb2_request_check_session(req);
3002 signing_required = x->global->signing_flags & SMBXSRV_SIGNING_REQUIRED;
3003 encryption_desired = x->global->encryption_flags & SMBXSRV_ENCRYPTION_DESIRED;
3004 encryption_required = x->global->encryption_flags & SMBXSRV_ENCRYPTION_REQUIRED;
3007 req->async_internal = false;
3008 req->do_signing = false;
3009 if (opcode != SMB2_OP_SESSSETUP) {
3010 req->do_encryption = encryption_desired;
3012 req->do_encryption = false;
3014 req->was_encrypted = false;
3015 if (intf_v->iov_len == SMB2_TF_HDR_SIZE) {
3016 const uint8_t *intf = SMBD_SMB2_IN_TF_PTR(req);
3017 uint64_t tf_session_id = BVAL(intf, SMB2_TF_SESSION_ID);
3019 if (x != NULL && x->global->session_wire_id != tf_session_id) {
3020 DEBUG(0,("smbd_smb2_request_dispatch: invalid session_id"
3021 "in SMB2_HDR[%llu], SMB2_TF[%llu]\n",
3022 (unsigned long long)x->global->session_wire_id,
3023 (unsigned long long)tf_session_id));
3025 * TODO: windows allows this...
3026 * should we drop the connection?
3028 * For now we just return ACCESS_DENIED
3029 * (Windows clients never trigger this)
3030 * and wait for an update of [MS-SMB2].
3032 return smbd_smb2_request_error(req,
3033 NT_STATUS_ACCESS_DENIED);
3036 req->was_encrypted = true;
3037 req->do_encryption = true;
3040 if (encryption_required && !req->was_encrypted) {
3041 req->do_encryption = true;
3042 return smbd_smb2_request_error(req,
3043 NT_STATUS_ACCESS_DENIED);
3046 call = smbd_smb2_call(opcode);
3048 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3051 allowed_flags = SMB2_HDR_FLAG_CHAINED |
3052 SMB2_HDR_FLAG_SIGNED |
3054 if (xconn->protocol >= PROTOCOL_SMB3_11) {
3055 allowed_flags |= SMB2_HDR_FLAG_PRIORITY_MASK;
3057 if (opcode == SMB2_OP_NEGPROT) {
3058 if (lp_server_max_protocol() >= PROTOCOL_SMB3_11) {
3059 allowed_flags |= SMB2_HDR_FLAG_PRIORITY_MASK;
3062 if (opcode == SMB2_OP_CANCEL) {
3063 allowed_flags |= SMB2_HDR_FLAG_ASYNC;
3065 if (xconn->protocol >= PROTOCOL_SMB2_22) {
3066 allowed_flags |= SMB2_HDR_FLAG_REPLAY_OPERATION;
3068 if ((flags & ~allowed_flags) != 0) {
3069 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3072 if (flags & SMB2_HDR_FLAG_CHAINED) {
3074 * This check is mostly for giving the correct error code
3075 * for compounded requests.
3077 if (!NT_STATUS_IS_OK(session_status)) {
3078 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3081 req->compat_chain_fsp = NULL;
3084 if (req->was_encrypted) {
3085 signing_required = false;
3086 } else if (signing_required || (flags & SMB2_HDR_FLAG_SIGNED)) {
3087 struct smb2_signing_key *signing_key = NULL;
3091 * MS-SMB2: 3.3.5.2.4 Verifying the Signature.
3092 * If the SMB2 header of the SMB2 NEGOTIATE
3093 * request has the SMB2_FLAGS_SIGNED bit set in the
3094 * Flags field, the server MUST fail the request
3095 * with STATUS_INVALID_PARAMETER.
3097 * Microsoft test tool checks this.
3100 if ((opcode == SMB2_OP_NEGPROT) &&
3101 (flags & SMB2_HDR_FLAG_SIGNED)) {
3102 status = NT_STATUS_INVALID_PARAMETER;
3104 status = NT_STATUS_USER_SESSION_DELETED;
3106 return smbd_smb2_request_error(req, status);
3109 signing_key = smbd_smb2_signing_key(x, xconn);
3112 * If we have a signing key, we should
3115 if (smb2_signing_key_valid(signing_key)) {
3116 req->do_signing = true;
3119 status = smb2_signing_check_pdu(signing_key,
3121 SMBD_SMB2_IN_HDR_IOV(req),
3122 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
3123 if (!NT_STATUS_IS_OK(status)) {
3124 return smbd_smb2_request_error(req, status);
3128 * Now that we know the request was correctly signed
3129 * we have to sign the response too.
3131 req->do_signing = true;
3133 if (!NT_STATUS_IS_OK(session_status)) {
3134 return smbd_smb2_request_error(req, session_status);
3136 } else if (opcode == SMB2_OP_CANCEL) {
3137 /* Cancel requests are allowed to skip the signing */
3138 } else if (opcode == SMB2_OP_IOCTL) {
3140 * Some special IOCTL calls don't require
3141 * file, tcon nor session.
3143 * They typically don't do any real action
3144 * on behalf of the client.
3146 * They are mainly used to alter the behavior
3147 * of the connection for testing. So we can
3148 * run as root and skip all file, tcon and session
3151 static const struct smbd_smb2_dispatch_table _root_ioctl_call = {
3155 const uint8_t *body = SMBD_SMB2_IN_BODY_PTR(req);
3156 size_t body_size = SMBD_SMB2_IN_BODY_LEN(req);
3157 uint32_t in_ctl_code;
3160 if (needed > body_size) {
3161 return smbd_smb2_request_error(req,
3162 NT_STATUS_INVALID_PARAMETER);
3165 in_ctl_code = IVAL(body, 0x04);
3167 * Only add trusted IOCTL codes here!
3169 switch (in_ctl_code) {
3170 case FSCTL_SMBTORTURE_FORCE_UNACKED_TIMEOUT:
3171 call = &_root_ioctl_call;
3174 } else if (signing_required) {
3176 * If signing is required we try to sign
3177 * a possible error response
3179 req->do_signing = true;
3180 return smbd_smb2_request_error(req, NT_STATUS_ACCESS_DENIED);
3183 if (flags & SMB2_HDR_FLAG_CHAINED) {
3184 req->compound_related = true;
3187 if (call->need_session) {
3188 if (!NT_STATUS_IS_OK(session_status)) {
3189 return smbd_smb2_request_error(req, session_status);
3193 if (call->need_tcon) {
3194 SMB_ASSERT(call->need_session);
3197 * This call needs to be run as user.
3199 * smbd_smb2_request_check_tcon()
3200 * calls change_to_user() on success.
3201 * Which implies set_current_user_info()
3202 * and chdir_current_service().
3204 status = smbd_smb2_request_check_tcon(req);
3205 if (!NT_STATUS_IS_OK(status)) {
3206 return smbd_smb2_request_error(req, status);
3208 if (req->tcon->global->encryption_flags & SMBXSRV_ENCRYPTION_DESIRED) {
3209 encryption_desired = true;
3211 if (req->tcon->global->encryption_flags & SMBXSRV_ENCRYPTION_REQUIRED) {
3212 encryption_required = true;
3214 if (encryption_required && !req->was_encrypted) {
3215 req->do_encryption = true;
3216 return smbd_smb2_request_error(req,
3217 NT_STATUS_ACCESS_DENIED);
3218 } else if (encryption_desired) {
3219 req->do_encryption = true;
3221 } else if (call->need_session) {
3222 struct auth_session_info *session_info = NULL;
3225 * Unless we also have need_tcon (see above),
3226 * we still need to call set_current_user_info().
3229 session_info = req->session->global->auth_session_info;
3230 if (session_info == NULL) {
3231 return NT_STATUS_INVALID_HANDLE;
3234 set_current_user_info(session_info->unix_info->sanitized_username,
3235 session_info->unix_info->unix_name,
3236 session_info->info->domain_name);
3240 bool update_session_global = false;
3241 bool update_tcon_global = false;
3243 smb2srv_update_crypto_flags(req, opcode,
3244 &update_session_global,
3245 &update_tcon_global);
3247 if (update_session_global) {
3248 status = smbXsrv_session_update(x);
3249 if (!NT_STATUS_IS_OK(status)) {
3250 return smbd_smb2_request_error(req, status);
3253 if (update_tcon_global) {
3254 status = smbXsrv_tcon_update(req->tcon);
3255 if (!NT_STATUS_IS_OK(status)) {
3256 return smbd_smb2_request_error(req, status);
3261 if (call->fileid_ofs != 0) {
3262 size_t needed = call->fileid_ofs + 16;
3263 const uint8_t *body = SMBD_SMB2_IN_BODY_PTR(req);
3264 size_t body_size = SMBD_SMB2_IN_BODY_LEN(req);
3265 uint64_t file_id_persistent;
3266 uint64_t file_id_volatile;
3267 struct files_struct *fsp;
3269 SMB_ASSERT(call->need_tcon);
3271 if (needed > body_size) {
3272 return smbd_smb2_request_error(req,
3273 NT_STATUS_INVALID_PARAMETER);
3276 file_id_persistent = BVAL(body, call->fileid_ofs + 0);
3277 file_id_volatile = BVAL(body, call->fileid_ofs + 8);
3279 fsp = file_fsp_smb2(req, file_id_persistent, file_id_volatile);
3281 if (!call->allow_invalid_fileid) {
3282 return smbd_smb2_request_error(req,
3283 NT_STATUS_FILE_CLOSED);
3286 if (file_id_persistent != UINT64_MAX) {
3287 return smbd_smb2_request_error(req,
3288 NT_STATUS_FILE_CLOSED);
3290 if (file_id_volatile != UINT64_MAX) {
3291 return smbd_smb2_request_error(req,
3292 NT_STATUS_FILE_CLOSED);
3297 status = smbd_smb2_request_dispatch_update_counts(req, call->modify);
3298 if (!NT_STATUS_IS_OK(status)) {
3299 return smbd_smb2_request_error(req, status);
3302 if (call->as_root) {
3303 SMB_ASSERT(call->fileid_ofs == 0);
3304 /* This call needs to be run as root */
3305 change_to_root_user();
3307 SMB_ASSERT(call->need_tcon);
3310 #define _INBYTES(_r) \
3311 iov_buflen(SMBD_SMB2_IN_HDR_IOV(_r), SMBD_SMB2_NUM_IOV_PER_REQ-1)
3314 case SMB2_OP_NEGPROT:
3315 SMBPROFILE_IOBYTES_ASYNC_START(smb2_negprot, profile_p,
3316 req->profile, _INBYTES(req));
3317 return_value = smbd_smb2_request_process_negprot(req);
3320 case SMB2_OP_SESSSETUP:
3321 SMBPROFILE_IOBYTES_ASYNC_START(smb2_sesssetup, profile_p,
3322 req->profile, _INBYTES(req));
3323 return_value = smbd_smb2_request_process_sesssetup(req);
3326 case SMB2_OP_LOGOFF:
3327 SMBPROFILE_IOBYTES_ASYNC_START(smb2_logoff, profile_p,
3328 req->profile, _INBYTES(req));
3329 return_value = smbd_smb2_request_process_logoff(req);
3333 SMBPROFILE_IOBYTES_ASYNC_START(smb2_tcon, profile_p,
3334 req->profile, _INBYTES(req));
3335 return_value = smbd_smb2_request_process_tcon(req);
3339 SMBPROFILE_IOBYTES_ASYNC_START(smb2_tdis, profile_p,
3340 req->profile, _INBYTES(req));
3341 return_value = smbd_smb2_request_process_tdis(req);
3344 case SMB2_OP_CREATE:
3345 if (req->subreq == NULL) {
3346 SMBPROFILE_IOBYTES_ASYNC_START(smb2_create, profile_p,
3347 req->profile, _INBYTES(req));
3349 SMBPROFILE_IOBYTES_ASYNC_SET_BUSY(req->profile);
3351 return_value = smbd_smb2_request_process_create(req);
3355 SMBPROFILE_IOBYTES_ASYNC_START(smb2_close, profile_p,
3356 req->profile, _INBYTES(req));
3357 return_value = smbd_smb2_request_process_close(req);
3361 SMBPROFILE_IOBYTES_ASYNC_START(smb2_flush, profile_p,
3362 req->profile, _INBYTES(req));
3363 return_value = smbd_smb2_request_process_flush(req);
3367 SMBPROFILE_IOBYTES_ASYNC_START(smb2_read, profile_p,
3368 req->profile, _INBYTES(req));
3369 return_value = smbd_smb2_request_process_read(req);
3373 SMBPROFILE_IOBYTES_ASYNC_START(smb2_write, profile_p,
3374 req->profile, _INBYTES(req));
3375 return_value = smbd_smb2_request_process_write(req);
3379 SMBPROFILE_IOBYTES_ASYNC_START(smb2_lock, profile_p,
3380 req->profile, _INBYTES(req));
3381 return_value = smbd_smb2_request_process_lock(req);
3385 SMBPROFILE_IOBYTES_ASYNC_START(smb2_ioctl, profile_p,
3386 req->profile, _INBYTES(req));
3387 return_value = smbd_smb2_request_process_ioctl(req);
3390 case SMB2_OP_CANCEL:
3391 SMBPROFILE_IOBYTES_ASYNC_START(smb2_cancel, profile_p,
3392 req->profile, _INBYTES(req));
3393 return_value = smbd_smb2_request_process_cancel(req);
3394 SMBPROFILE_IOBYTES_ASYNC_END(req->profile, 0);
3397 * We don't need the request anymore cancel requests never
3400 * smbd_smb2_request_process_cancel() already called
3401 * DLIST_REMOVE(xconn->smb2.requests, req);
3407 case SMB2_OP_KEEPALIVE:
3408 SMBPROFILE_IOBYTES_ASYNC_START(smb2_keepalive, profile_p,
3409 req->profile, _INBYTES(req));
3410 return_value = smbd_smb2_request_process_keepalive(req);
3413 case SMB2_OP_QUERY_DIRECTORY:
3414 SMBPROFILE_IOBYTES_ASYNC_START(smb2_find, profile_p,
3415 req->profile, _INBYTES(req));
3416 return_value = smbd_smb2_request_process_query_directory(req);
3419 case SMB2_OP_NOTIFY:
3420 SMBPROFILE_IOBYTES_ASYNC_START(smb2_notify, profile_p,
3421 req->profile, _INBYTES(req));
3422 return_value = smbd_smb2_request_process_notify(req);
3425 case SMB2_OP_GETINFO:
3426 SMBPROFILE_IOBYTES_ASYNC_START(smb2_getinfo, profile_p,
3427 req->profile, _INBYTES(req));
3428 return_value = smbd_smb2_request_process_getinfo(req);
3431 case SMB2_OP_SETINFO:
3432 SMBPROFILE_IOBYTES_ASYNC_START(smb2_setinfo, profile_p,
3433 req->profile, _INBYTES(req));
3434 return_value = smbd_smb2_request_process_setinfo(req);
3438 SMBPROFILE_IOBYTES_ASYNC_START(smb2_break, profile_p,
3439 req->profile, _INBYTES(req));
3440 return_value = smbd_smb2_request_process_break(req);
3444 return_value = smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3447 return return_value;
3450 static void smbd_smb2_request_reply_update_counts(struct smbd_smb2_request *req)
3452 struct smbXsrv_connection *xconn = req->xconn;
3453 const uint8_t *inhdr;
3454 uint16_t channel_sequence;
3455 struct smbXsrv_open *op;
3457 if (!req->request_counters_updated) {
3461 req->request_counters_updated = false;
3463 if (xconn->protocol < PROTOCOL_SMB2_22) {
3467 if (req->compat_chain_fsp == NULL) {
3471 op = req->compat_chain_fsp->op;
3476 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
3477 channel_sequence = SVAL(inhdr, SMB2_HDR_CHANNEL_SEQUENCE);
3479 if ((op->global->channel_sequence == channel_sequence) &&
3480 (op->global->channel_generation == req->channel_generation)) {
3481 SMB_ASSERT(op->request_count > 0);
3482 op->request_count -= 1;
3484 SMB_ASSERT(op->pre_request_count > 0);
3485 op->pre_request_count -= 1;
3489 static NTSTATUS smbd_smb2_request_reply(struct smbd_smb2_request *req)
3491 struct smbXsrv_connection *xconn = req->xconn;
3493 struct iovec *firsttf = SMBD_SMB2_IDX_TF_IOV(req,out,first_idx);
3494 struct iovec *outhdr = SMBD_SMB2_OUT_HDR_IOV(req);
3495 struct iovec *outdyn = SMBD_SMB2_OUT_DYN_IOV(req);
3500 TALLOC_FREE(req->async_te);
3502 /* MS-SMB2: 3.3.4.1 Sending Any Outgoing Message */
3503 smbd_smb2_request_reply_update_counts(req);
3505 if (req->do_encryption &&
3506 (firsttf->iov_len == 0) &&
3507 (req->first_key.length == 0) &&
3508 (req->session != NULL) &&
3509 smb2_signing_key_valid(req->session->global->encryption_key))
3511 struct smb2_signing_key *encryption_key =
3512 req->session->global->encryption_key;
3514 uint64_t session_id = req->session->global->session_wire_id;
3515 uint64_t nonce_high;
3518 status = smb2_get_new_nonce(req->session,
3521 if (!NT_STATUS_IS_OK(status)) {
3526 * We need to place the SMB2_TRANSFORM header before the
3531 * we need to remember the encryption key
3532 * and defer the signing/encryption until
3533 * we are sure that we do not change
3536 req->first_key = data_blob_dup_talloc(req,
3537 encryption_key->blob);
3538 if (req->first_key.data == NULL) {
3539 return NT_STATUS_NO_MEMORY;
3542 tf = talloc_zero_array(req, uint8_t,
3545 return NT_STATUS_NO_MEMORY;
3548 SIVAL(tf, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
3549 SBVAL(tf, SMB2_TF_NONCE+0, nonce_low);
3550 SBVAL(tf, SMB2_TF_NONCE+8, nonce_high);
3551 SBVAL(tf, SMB2_TF_SESSION_ID, session_id);
3553 firsttf->iov_base = (void *)tf;
3554 firsttf->iov_len = SMB2_TF_HDR_SIZE;
3557 if ((req->current_idx > SMBD_SMB2_NUM_IOV_PER_REQ) &&
3558 (req->last_key.length > 0) &&
3559 (firsttf->iov_len == 0))
3561 int last_idx = req->current_idx - SMBD_SMB2_NUM_IOV_PER_REQ;
3562 struct iovec *lasthdr = SMBD_SMB2_IDX_HDR_IOV(req,out,last_idx);
3563 struct smb2_signing_key key = {
3564 .blob = req->last_key,
3568 * As we are sure the header of the last request in the
3569 * compound chain will not change, we can to sign here
3570 * with the last signing key we remembered.
3572 status = smb2_signing_sign_pdu(&key,
3575 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
3576 smb2_signing_key_destructor(&key);
3577 if (!NT_STATUS_IS_OK(status)) {
3581 if (req->last_key.length > 0) {
3582 data_blob_clear_free(&req->last_key);
3585 SMBPROFILE_IOBYTES_ASYNC_END(req->profile,
3586 iov_buflen(outhdr, SMBD_SMB2_NUM_IOV_PER_REQ-1));
3588 req->current_idx += SMBD_SMB2_NUM_IOV_PER_REQ;
3590 if (req->current_idx < req->out.vector_count) {
3592 * We must process the remaining compound
3593 * SMB2 requests before any new incoming SMB2
3594 * requests. This is because incoming SMB2
3595 * requests may include a cancel for a
3596 * compound request we haven't processed
3599 struct tevent_immediate *im = tevent_create_immediate(req);
3601 return NT_STATUS_NO_MEMORY;
3604 if (req->do_signing && firsttf->iov_len == 0) {
3605 struct smbXsrv_session *x = req->session;
3606 struct smb2_signing_key *signing_key =
3607 smbd_smb2_signing_key(x, xconn);
3610 * we need to remember the signing key
3611 * and defer the signing until
3612 * we are sure that we do not change
3615 req->last_key = data_blob_dup_talloc(req,
3617 if (req->last_key.data == NULL) {
3618 return NT_STATUS_NO_MEMORY;
3623 * smbd_smb2_request_dispatch() will redo the impersonation.
3624 * So we use req->xconn->client->raw_ev_ctx instead
3625 * of req->ev_ctx here.
3627 tevent_schedule_immediate(im,
3628 req->xconn->client->raw_ev_ctx,
3629 smbd_smb2_request_dispatch_immediate,
3631 return NT_STATUS_OK;
3634 if (req->compound_related) {
3635 req->compound_related = false;
3638 ok = smb2_setup_nbt_length(req->out.vector, req->out.vector_count);
3640 return NT_STATUS_INVALID_PARAMETER_MIX;
3643 /* Set credit for these operations (zero credits if this
3644 is a final reply for an async operation). */
3645 smb2_calculate_credits(req, req);
3648 * now check if we need to sign the current response
3650 if (firsttf->iov_len == SMB2_TF_HDR_SIZE) {
3651 struct smb2_signing_key key = {
3652 .blob = req->first_key,
3654 status = smb2_signing_encrypt_pdu(&key,
3655 xconn->smb2.server.cipher,
3657 req->out.vector_count - first_idx);
3658 smb2_signing_key_destructor(&key);
3659 if (!NT_STATUS_IS_OK(status)) {
3662 } else if (req->do_signing) {
3663 struct smbXsrv_session *x = req->session;
3664 struct smb2_signing_key *signing_key =
3665 smbd_smb2_signing_key(x, xconn);
3667 status = smb2_signing_sign_pdu(signing_key,
3670 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
3671 if (!NT_STATUS_IS_OK(status)) {
3675 if (req->first_key.length > 0) {
3676 data_blob_clear_free(&req->first_key);
3679 if (req->preauth != NULL) {
3680 gnutls_hash_hd_t hash_hnd = NULL;
3684 rc = gnutls_hash_init(&hash_hnd, GNUTLS_DIG_SHA512);
3686 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3688 rc = gnutls_hash(hash_hnd,
3689 req->preauth->sha512_value,
3690 sizeof(req->preauth->sha512_value));
3692 gnutls_hash_deinit(hash_hnd, NULL);
3693 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3695 for (i = 1; i < req->in.vector_count; i++) {
3696 rc = gnutls_hash(hash_hnd,
3697 req->in.vector[i].iov_base,
3698 req->in.vector[i].iov_len);
3700 gnutls_hash_deinit(hash_hnd, NULL);
3701 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3705 gnutls_hash_deinit(hash_hnd, NULL);
3706 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3708 gnutls_hash_output(hash_hnd, req->preauth->sha512_value);
3710 rc = gnutls_hash(hash_hnd,
3711 req->preauth->sha512_value,
3712 sizeof(req->preauth->sha512_value));
3714 gnutls_hash_deinit(hash_hnd, NULL);
3715 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3717 for (i = 1; i < req->out.vector_count; i++) {
3718 rc = gnutls_hash(hash_hnd,
3719 req->out.vector[i].iov_base,
3720 req->out.vector[i].iov_len);
3722 gnutls_hash_deinit(hash_hnd, NULL);
3723 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3727 gnutls_hash_deinit(hash_hnd, req->preauth->sha512_value);
3729 req->preauth = NULL;
3732 /* I am a sick, sick man... :-). Sendfile hack ... JRA. */
3733 if (req->out.vector_count < (2*SMBD_SMB2_NUM_IOV_PER_REQ) &&
3734 outdyn->iov_base == NULL && outdyn->iov_len != 0) {
3735 /* Dynamic part is NULL. Chop it off,
3736 We're going to send it via sendfile. */
3737 req->out.vector_count -= 1;
3741 * We're done with this request -
3742 * move it off the "being processed" queue.
3744 DLIST_REMOVE(xconn->smb2.requests, req);
3746 req->queue_entry.mem_ctx = req;
3747 req->queue_entry.vector = req->out.vector;
3748 req->queue_entry.count = req->out.vector_count;
3749 req->queue_entry.xconn = xconn;
3750 DLIST_ADD_END(xconn->smb2.send_queue, &req->queue_entry);
3751 xconn->smb2.send_queue_len++;
3752 DBG_ERR("queue e[%p]\n", &req->queue_entry);
3753 status = smbd_smb2_flush_send_queue(xconn);
3754 if (!NT_STATUS_IS_OK(status)) {
3758 return NT_STATUS_OK;
3761 static NTSTATUS smbd_smb2_request_next_incoming(struct smbXsrv_connection *xconn);
3763 void smbd_smb2_request_dispatch_immediate(struct tevent_context *ctx,
3764 struct tevent_immediate *im,
3767 struct smbd_smb2_request *req = talloc_get_type_abort(private_data,
3768 struct smbd_smb2_request);
3769 struct smbXsrv_connection *xconn = req->xconn;
3774 if (DEBUGLEVEL >= 10) {
3775 DEBUG(10,("smbd_smb2_request_dispatch_immediate: idx[%d] of %d vectors\n",
3776 req->current_idx, req->in.vector_count));
3777 print_req_vectors(req);
3780 status = smbd_smb2_request_dispatch(req);
3781 if (!NT_STATUS_IS_OK(status)) {
3782 smbd_server_connection_terminate(xconn, nt_errstr(status));
3786 status = smbd_smb2_request_next_incoming(xconn);
3787 if (!NT_STATUS_IS_OK(status)) {
3788 smbd_server_connection_terminate(xconn, nt_errstr(status));
3793 NTSTATUS smbd_smb2_request_done_ex(struct smbd_smb2_request *req,
3795 DATA_BLOB body, DATA_BLOB *dyn,
3796 const char *location)
3799 struct iovec *outbody_v;
3800 struct iovec *outdyn_v;
3801 uint32_t next_command_ofs;
3804 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
3805 mid = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
3807 DBG_ERR("mid [%"PRIu64"] idx[%d] status[%s] "
3808 "body[%u] dyn[%s:%u] at %s\n",
3812 (unsigned int)body.length,
3814 (unsigned int)(dyn ? dyn->length : 0),
3816 DBG_ERR("queue e[%p]\n", &req->queue_entry);
3818 if (body.length < 2) {
3819 return smbd_smb2_request_error(req, NT_STATUS_INTERNAL_ERROR);
3822 if ((body.length % 2) != 0) {
3823 return smbd_smb2_request_error(req, NT_STATUS_INTERNAL_ERROR);
3826 outbody_v = SMBD_SMB2_OUT_BODY_IOV(req);
3827 outdyn_v = SMBD_SMB2_OUT_DYN_IOV(req);
3829 next_command_ofs = IVAL(outhdr, SMB2_HDR_NEXT_COMMAND);
3830 SIVAL(outhdr, SMB2_HDR_STATUS, NT_STATUS_V(status));
3832 outbody_v->iov_base = (void *)body.data;
3833 outbody_v->iov_len = body.length;
3836 outdyn_v->iov_base = (void *)dyn->data;
3837 outdyn_v->iov_len = dyn->length;
3839 outdyn_v->iov_base = NULL;
3840 outdyn_v->iov_len = 0;
3844 * See if we need to recalculate the offset to the next response
3846 * Note that all responses may require padding (including the very last
3849 if (req->out.vector_count >= (2 * SMBD_SMB2_NUM_IOV_PER_REQ)) {
3850 next_command_ofs = SMB2_HDR_BODY;
3851 next_command_ofs += SMBD_SMB2_OUT_BODY_LEN(req);
3852 next_command_ofs += SMBD_SMB2_OUT_DYN_LEN(req);
3855 if ((next_command_ofs % 8) != 0) {
3856 size_t pad_size = 8 - (next_command_ofs % 8);
3857 if (SMBD_SMB2_OUT_DYN_LEN(req) == 0) {
3859 * if the dyn buffer is empty
3860 * we can use it to add padding
3864 pad = talloc_zero_array(req,
3867 return smbd_smb2_request_error(req,
3868 NT_STATUS_NO_MEMORY);
3871 outdyn_v->iov_base = (void *)pad;
3872 outdyn_v->iov_len = pad_size;
3875 * For now we copy the dynamic buffer
3876 * and add the padding to the new buffer
3883 old_size = SMBD_SMB2_OUT_DYN_LEN(req);
3884 old_dyn = SMBD_SMB2_OUT_DYN_PTR(req);
3886 new_size = old_size + pad_size;
3887 new_dyn = talloc_zero_array(req,
3889 if (new_dyn == NULL) {
3890 return smbd_smb2_request_error(req,
3891 NT_STATUS_NO_MEMORY);
3894 memcpy(new_dyn, old_dyn, old_size);
3895 memset(new_dyn + old_size, 0, pad_size);
3897 outdyn_v->iov_base = (void *)new_dyn;
3898 outdyn_v->iov_len = new_size;
3900 next_command_ofs += pad_size;
3903 if ((req->current_idx + SMBD_SMB2_NUM_IOV_PER_REQ) >= req->out.vector_count) {
3904 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, 0);
3906 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, next_command_ofs);
3908 return smbd_smb2_request_reply(req);
3911 NTSTATUS smbd_smb2_request_error_ex(struct smbd_smb2_request *req,
3914 const char *location)
3916 struct smbXsrv_connection *xconn = req->xconn;
3919 uint8_t *outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
3920 size_t unread_bytes = smbd_smb2_unread_bytes(req);
3922 DBG_ERR("smbd_smb2_request_error_ex: idx[%d] status[%s] |%s| "
3923 "at %s\n", req->current_idx, nt_errstr(status),
3924 info ? " +info" : "", location);
3927 /* Recvfile error. Drain incoming socket. */
3931 ret = drain_socket(xconn->transport.sock, unread_bytes);
3932 if (ret != unread_bytes) {
3936 error = NT_STATUS_IO_DEVICE_ERROR;
3938 error = map_nt_error_from_unix_common(errno);
3941 DEBUG(2, ("Failed to drain %u bytes from SMB2 socket: "
3942 "ret[%u] errno[%d] => %s\n",
3943 (unsigned)unread_bytes,
3944 (unsigned)ret, errno, nt_errstr(error)));
3949 body.data = outhdr + SMB2_HDR_BODY;
3951 SSVAL(body.data, 0, 9);
3954 SIVAL(body.data, 0x04, info->length);
3956 /* Allocated size of req->out.vector[i].iov_base
3957 * *MUST BE* OUTVEC_ALLOC_SIZE. So we have room for
3958 * 1 byte without having to do an alloc.
3961 info->data = ((uint8_t *)outhdr) +
3962 OUTVEC_ALLOC_SIZE - 1;
3964 SCVAL(info->data, 0, 0);
3968 * Note: Even if there is an error, continue to process the request.
3972 return smbd_smb2_request_done_ex(req, status, body, info, __location__);
3975 struct smbd_smb2_break_state {
3976 struct tevent_req *req;
3977 struct smbd_smb2_send_queue queue_entry;
3978 uint8_t nbt_hdr[NBT_HDR_SIZE];
3979 uint8_t hdr[SMB2_HDR_BODY];
3980 struct iovec vector[1+SMBD_SMB2_NUM_IOV_PER_REQ];
3983 static struct tevent_req *smbd_smb2_break_send(TALLOC_CTX *mem_ctx,
3984 struct tevent_context *ev,
3985 struct smbXsrv_connection *xconn,
3986 uint64_t session_id,
3987 const uint8_t *body,
3990 struct tevent_req *req = NULL;
3991 struct smbd_smb2_break_state *state = NULL;
3995 req = tevent_req_create(mem_ctx, &state,
3996 struct smbd_smb2_break_state);
4002 tevent_req_defer_callback(req, ev);
4004 SIVAL(state->hdr, 0, SMB2_MAGIC);
4005 SSVAL(state->hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
4006 SSVAL(state->hdr, SMB2_HDR_EPOCH, 0);
4007 SIVAL(state->hdr, SMB2_HDR_STATUS, 0);
4008 SSVAL(state->hdr, SMB2_HDR_OPCODE, SMB2_OP_BREAK);
4009 SSVAL(state->hdr, SMB2_HDR_CREDIT, 0);
4010 SIVAL(state->hdr, SMB2_HDR_FLAGS, SMB2_HDR_FLAG_REDIRECT);
4011 SIVAL(state->hdr, SMB2_HDR_NEXT_COMMAND, 0);
4012 SBVAL(state->hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
4013 SIVAL(state->hdr, SMB2_HDR_PID, 0);
4014 SIVAL(state->hdr, SMB2_HDR_TID, 0);
4015 SBVAL(state->hdr, SMB2_HDR_SESSION_ID, session_id);
4016 memset(state->hdr+SMB2_HDR_SIGNATURE, 0, 16);
4018 state->vector[0] = (struct iovec) {
4019 .iov_base = state->nbt_hdr,
4020 .iov_len = sizeof(state->nbt_hdr)
4023 state->vector[1+SMBD_SMB2_TF_IOV_OFS] = (struct iovec) {
4028 state->vector[1+SMBD_SMB2_HDR_IOV_OFS] = (struct iovec) {
4029 .iov_base = state->hdr,
4030 .iov_len = sizeof(state->hdr)
4033 state->vector[1+SMBD_SMB2_BODY_IOV_OFS] = (struct iovec) {
4034 .iov_base = discard_const_p(uint8_t, body),
4035 .iov_len = body_len,
4039 * state->vector[1+SMBD_SMB2_DYN_IOV_OFS] is NULL by talloc_zero above
4042 ok = smb2_setup_nbt_length(state->vector,
4043 1 + SMBD_SMB2_NUM_IOV_PER_REQ);
4045 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
4046 return tevent_req_post(req, ev);
4050 * We require TCP acks for this PDU to the client!
4051 * We want 5 retransmissions and timeout when the
4052 * retransmission timeout (rto) passed 6 times.
4054 * required_acked_bytes gets a dummy value of
4055 * UINT64_MAX, as long it's in xconn->smb2.send_queue,
4056 * it'll get the real value when it's moved to
4059 * state->queue_entry.ack.req gets completed with
4060 * 1. tevent_req_done(), when all bytes are acked.
4061 * 2a. tevent_req_nterror(NT_STATUS_IO_TIMEOUT), when
4062 * the timeout expired before all bytes were acked.
4063 * 2b. tevent_req_nterror(transport_error), when the
4064 * connection got a disconnect from the kernel.
4066 state->queue_entry.ack.timeout =
4067 timeval_current_ofs_usec(xconn->ack.rto_usecs * 6);
4068 state->queue_entry.ack.required_acked_bytes = UINT64_MAX;
4069 state->queue_entry.ack.req = req;
4070 state->queue_entry.mem_ctx = state;
4071 state->queue_entry.vector = state->vector;
4072 state->queue_entry.count = ARRAY_SIZE(state->vector);
4073 state->queue_entry.xconn = xconn;
4074 DLIST_ADD_END(xconn->smb2.send_queue, &state->queue_entry);
4075 xconn->smb2.send_queue_len++;
4077 DBG_ERR("queue e[%p]\n", &state->queue_entry);
4078 status = smbd_smb2_flush_send_queue(xconn);
4079 if (tevent_req_nterror(req, status)) {
4080 return tevent_req_post(req, ev);
4086 static NTSTATUS smbd_smb2_break_recv(struct tevent_req *req)
4088 return tevent_req_simple_recv_ntstatus(req);
4091 struct smbXsrv_pending_break {
4092 struct smbXsrv_pending_break *prev, *next;
4093 struct smbXsrv_client *client;
4094 bool disable_oplock_break_retries;
4095 uint64_t session_id;
4096 uint64_t last_channel_id;
4099 uint8_t oplock[0x18];
4100 uint8_t lease[0x2c];
4105 static void smbXsrv_pending_break_done(struct tevent_req *subreq);
4107 static struct smbXsrv_pending_break *smbXsrv_pending_break_create(
4108 struct smbXsrv_client *client,
4109 uint64_t session_id)
4111 struct smbXsrv_pending_break *pb = NULL;
4113 pb = talloc_zero(client, struct smbXsrv_pending_break);
4117 pb->client = client;
4118 pb->session_id = session_id;
4119 pb->disable_oplock_break_retries = lp_smb2_disable_oplock_break_retry();
4124 static NTSTATUS smbXsrv_pending_break_submit(struct smbXsrv_pending_break *pb);
4126 static NTSTATUS smbXsrv_pending_break_schedule(struct smbXsrv_pending_break *pb)
4128 struct smbXsrv_client *client = pb->client;
4131 DLIST_ADD_END(client->pending_breaks, pb);
4132 status = smbXsrv_client_pending_breaks_updated(client);
4133 if (!NT_STATUS_IS_OK(status)) {
4137 status = smbXsrv_pending_break_submit(pb);
4138 if (!NT_STATUS_IS_OK(status)) {
4142 return NT_STATUS_OK;
4145 static NTSTATUS smbXsrv_pending_break_submit(struct smbXsrv_pending_break *pb)
4147 struct smbXsrv_client *client = pb->client;
4148 struct smbXsrv_session *session = NULL;
4149 struct smbXsrv_connection *xconn = NULL;
4150 struct smbXsrv_connection *oplock_xconn = NULL;
4151 struct tevent_req *subreq = NULL;
4154 if (pb->session_id != 0) {
4155 status = get_valid_smbXsrv_session(client,
4158 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
4159 return NT_STATUS_ABANDONED;
4161 if (!NT_STATUS_IS_OK(status)) {
4165 if (pb->last_channel_id != 0) {
4167 * This is what current Windows servers
4168 * do, they don't retry on all available
4169 * channels. They only use the last channel.
4171 * But it doesn't match the specification in
4172 * [MS-SMB2] "3.3.4.6 Object Store Indicates an
4175 * Per default disable_oplock_break_retries is false
4176 * and we behave like the specification.
4178 if (pb->disable_oplock_break_retries) {
4179 return NT_STATUS_ABANDONED;
4184 for (xconn = client->connections; xconn != NULL; xconn = xconn->next) {
4185 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
4189 if (xconn->channel_id == 0) {
4191 * non-multichannel case
4196 if (session != NULL) {
4197 struct smbXsrv_channel_global0 *c = NULL;
4200 * Having a session means we're handling
4201 * an oplock break and we only need to
4202 * use channels available on the
4205 status = smbXsrv_session_find_channel(session, xconn, &c);
4206 if (!NT_STATUS_IS_OK(status)) {
4211 * This is what current Windows servers
4212 * do, they don't retry on all available
4213 * channels. They only use the last channel.
4215 * But it doesn't match the specification
4216 * in [MS-SMB2] "3.3.4.6 Object Store Indicates an
4219 * Per default disable_oplock_break_retries is false
4220 * and we behave like the specification.
4222 if (pb->disable_oplock_break_retries) {
4223 oplock_xconn = xconn;
4228 if (xconn->channel_id > pb->last_channel_id) {
4236 if (xconn == NULL) {
4237 xconn = oplock_xconn;
4240 if (xconn == NULL) {
4242 * If there's no remaining connection available
4243 * tell the caller to stop...
4245 return NT_STATUS_ABANDONED;
4248 pb->last_channel_id = xconn->channel_id;
4250 subreq = smbd_smb2_break_send(pb,
4256 if (subreq == NULL) {
4257 return NT_STATUS_NO_MEMORY;
4259 tevent_req_set_callback(subreq,
4260 smbXsrv_pending_break_done,
4263 return NT_STATUS_OK;
4266 static void smbXsrv_pending_break_done(struct tevent_req *subreq)
4268 struct smbXsrv_pending_break *pb =
4269 tevent_req_callback_data(subreq,
4270 struct smbXsrv_pending_break);
4271 struct smbXsrv_client *client = pb->client;
4274 status = smbd_smb2_break_recv(subreq);
4275 TALLOC_FREE(subreq);
4276 if (!NT_STATUS_IS_OK(status)) {
4277 status = smbXsrv_pending_break_submit(pb);
4278 if (NT_STATUS_EQUAL(status, NT_STATUS_ABANDONED)) {
4280 * If there's no remaing connection
4281 * there's no need to send a break again.
4285 if (!NT_STATUS_IS_OK(status)) {
4286 smbd_server_disconnect_client(client, nt_errstr(status));
4293 DLIST_REMOVE(client->pending_breaks, pb);
4296 status = smbXsrv_client_pending_breaks_updated(client);
4297 if (!NT_STATUS_IS_OK(status)) {
4298 smbd_server_disconnect_client(client, nt_errstr(status));
4303 NTSTATUS smbd_smb2_send_oplock_break(struct smbXsrv_client *client,
4304 struct smbXsrv_open *op,
4305 uint8_t oplock_level)
4307 struct smbXsrv_pending_break *pb = NULL;
4308 uint8_t *body = NULL;
4310 pb = smbXsrv_pending_break_create(client,
4313 return NT_STATUS_NO_MEMORY;
4315 pb->body_len = sizeof(pb->body.oplock);
4316 body = pb->body.oplock;
4318 SSVAL(body, 0x00, pb->body_len);
4319 SCVAL(body, 0x02, oplock_level);
4320 SCVAL(body, 0x03, 0); /* reserved */
4321 SIVAL(body, 0x04, 0); /* reserved */
4322 SBVAL(body, 0x08, op->global->open_persistent_id);
4323 SBVAL(body, 0x10, op->global->open_volatile_id);
4325 return smbXsrv_pending_break_schedule(pb);
4328 NTSTATUS smbd_smb2_send_lease_break(struct smbXsrv_client *client,
4330 uint32_t lease_flags,
4331 struct smb2_lease_key *lease_key,
4332 uint32_t current_lease_state,
4333 uint32_t new_lease_state)
4335 struct smbXsrv_pending_break *pb = NULL;
4336 uint8_t *body = NULL;
4338 pb = smbXsrv_pending_break_create(client,
4339 0); /* no session_id */
4341 return NT_STATUS_NO_MEMORY;
4343 pb->body_len = sizeof(pb->body.lease);
4344 body = pb->body.lease;
4346 SSVAL(body, 0x00, pb->body_len);
4347 SSVAL(body, 0x02, new_epoch);
4348 SIVAL(body, 0x04, lease_flags);
4349 SBVAL(body, 0x08, lease_key->data[0]);
4350 SBVAL(body, 0x10, lease_key->data[1]);
4351 SIVAL(body, 0x18, current_lease_state);
4352 SIVAL(body, 0x1c, new_lease_state);
4353 SIVAL(body, 0x20, 0); /* BreakReason, MUST be 0 */
4354 SIVAL(body, 0x24, 0); /* AccessMaskHint, MUST be 0 */
4355 SIVAL(body, 0x28, 0); /* ShareMaskHint, MUST be 0 */
4357 return smbXsrv_pending_break_schedule(pb);
4360 static bool is_smb2_recvfile_write(struct smbd_smb2_request_read_state *state)
4364 uint64_t file_id_persistent;
4365 uint64_t file_id_volatile;
4366 struct smbXsrv_open *op = NULL;
4367 struct files_struct *fsp = NULL;
4368 const uint8_t *body = NULL;
4371 * This is only called with a pktbuf
4372 * of at least SMBD_SMB2_SHORT_RECEIVEFILE_WRITE_LEN
4376 if (IVAL(state->pktbuf, 0) == SMB2_TF_MAGIC) {
4377 /* Transform header. Cannot recvfile. */
4380 if (IVAL(state->pktbuf, 0) != SMB2_MAGIC) {
4381 /* Not SMB2. Normal error path will cope. */
4384 if (SVAL(state->pktbuf, 4) != SMB2_HDR_BODY) {
4385 /* Not SMB2. Normal error path will cope. */
4388 if (SVAL(state->pktbuf, SMB2_HDR_OPCODE) != SMB2_OP_WRITE) {
4389 /* Needs to be a WRITE. */
4392 if (IVAL(state->pktbuf, SMB2_HDR_NEXT_COMMAND) != 0) {
4393 /* Chained. Cannot recvfile. */
4396 flags = IVAL(state->pktbuf, SMB2_HDR_FLAGS);
4397 if (flags & SMB2_HDR_FLAG_CHAINED) {
4398 /* Chained. Cannot recvfile. */
4401 if (flags & SMB2_HDR_FLAG_SIGNED) {
4402 /* Signed. Cannot recvfile. */
4406 body = &state->pktbuf[SMB2_HDR_BODY];
4408 file_id_persistent = BVAL(body, 0x10);
4409 file_id_volatile = BVAL(body, 0x18);
4411 status = smb2srv_open_lookup(state->req->xconn,
4416 if (!NT_STATUS_IS_OK(status)) {
4424 if (fsp->conn == NULL) {
4428 if (IS_IPC(fsp->conn)) {
4431 if (IS_PRINT(fsp->conn)) {
4434 if (fsp->base_fsp != NULL) {
4438 DEBUG(10,("Doing recvfile write len = %u\n",
4439 (unsigned int)(state->pktfull - state->pktlen)));
4444 static NTSTATUS smbd_smb2_request_next_incoming(struct smbXsrv_connection *xconn)
4446 struct smbd_server_connection *sconn = xconn->client->sconn;
4447 struct smbd_smb2_request_read_state *state = &xconn->smb2.request_read_state;
4448 size_t max_send_queue_len;
4449 size_t cur_send_queue_len;
4451 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
4453 * we're not supposed to do any io
4455 return NT_STATUS_OK;
4458 if (state->req != NULL) {
4460 * if there is already a tstream_readv_pdu
4461 * pending, we are done.
4463 return NT_STATUS_OK;
4466 max_send_queue_len = MAX(1, xconn->smb2.credits.max/16);
4467 cur_send_queue_len = xconn->smb2.send_queue_len;
4469 if (cur_send_queue_len > max_send_queue_len) {
4471 * if we have a lot of requests to send,
4472 * we wait until they are on the wire until we
4473 * ask for the next request.
4475 smb_panic(__location__);
4476 return NT_STATUS_OK;
4479 /* ask for the next request */
4480 ZERO_STRUCTP(state);
4481 state->req = smbd_smb2_request_allocate(xconn);
4482 if (state->req == NULL) {
4483 return NT_STATUS_NO_MEMORY;
4485 state->req->sconn = sconn;
4486 state->req->xconn = xconn;
4487 state->min_recv_size = lp_min_receive_file_size();
4489 TEVENT_FD_READABLE(xconn->transport.fde);
4491 return NT_STATUS_OK;
4494 NTSTATUS smbd_smb2_process_negprot(struct smbXsrv_connection *xconn,
4495 uint64_t expected_seq_low,
4496 const uint8_t *inpdu, size_t size)
4498 struct smbd_server_connection *sconn = xconn->client->sconn;
4500 struct smbd_smb2_request *req = NULL;
4502 DEBUG(10,("smbd_smb2_first_negprot: packet length %u\n",
4503 (unsigned int)size));
4505 status = smbd_initialize_smb2(xconn, expected_seq_low);
4506 if (!NT_STATUS_IS_OK(status)) {
4507 smbd_server_connection_terminate(xconn, nt_errstr(status));
4512 * If a new connection joins the process, when we're
4513 * already in a "pending break cycle", we need to
4514 * turn on the ack checker on the new connection.
4516 status = smbXsrv_client_pending_breaks_updated(xconn->client);
4517 if (!NT_STATUS_IS_OK(status)) {
4519 * If there's a problem, we disconnect the whole
4520 * client with all connections here!
4522 * Instead of just the new connection.
4524 smbd_server_disconnect_client(xconn->client, nt_errstr(status));
4528 status = smbd_smb2_request_create(xconn, inpdu, size, &req);
4529 if (!NT_STATUS_IS_OK(status)) {
4530 smbd_server_connection_terminate(xconn, nt_errstr(status));
4534 status = smbd_smb2_request_validate(req);
4535 if (!NT_STATUS_IS_OK(status)) {
4536 smbd_server_connection_terminate(xconn, nt_errstr(status));
4540 status = smbd_smb2_request_setup_out(req);
4541 if (!NT_STATUS_IS_OK(status)) {
4542 smbd_server_connection_terminate(xconn, nt_errstr(status));
4548 * this was already counted at the SMB1 layer =>
4549 * smbd_smb2_request_dispatch() should not count it twice.
4551 if (profile_p->values.request_stats.count > 0) {
4552 profile_p->values.request_stats.count--;
4555 status = smbd_smb2_request_dispatch(req);
4556 if (!NT_STATUS_IS_OK(status)) {
4557 smbd_server_connection_terminate(xconn, nt_errstr(status));
4561 status = smbd_smb2_request_next_incoming(xconn);
4562 if (!NT_STATUS_IS_OK(status)) {
4563 smbd_server_connection_terminate(xconn, nt_errstr(status));
4567 sconn->num_requests++;
4568 return NT_STATUS_OK;
4571 static int socket_error_from_errno(int ret,
4585 if (sys_errno == 0) {
4589 if (sys_errno == EINTR) {
4594 if (sys_errno == EINPROGRESS) {
4599 if (sys_errno == EAGAIN) {
4604 /* ENOMEM is retryable on Solaris/illumos, and possibly other systems. */
4605 if (sys_errno == ENOMEM) {
4611 #if EWOULDBLOCK != EAGAIN
4612 if (sys_errno == EWOULDBLOCK) {
4622 static NTSTATUS smbd_smb2_advance_send_queue(struct smbXsrv_connection *xconn,
4623 struct smbd_smb2_send_queue **_e,
4626 struct smbd_smb2_send_queue *e = *_e;
4629 xconn->ack.unacked_bytes += n;
4632 ok = iov_advance(&e->vector, &e->count, n);
4634 return NT_STATUS_INTERNAL_ERROR;
4636 } else if (e->vfs_io_size > 0) {
4637 if (n > e->vfs_io_size) {
4638 return NT_STATUS_INTERNAL_ERROR;
4640 e->vfs_io_size -= n;
4644 return NT_STATUS_RETRY;
4645 } else if (e->vfs_io_size > 0) {
4646 return NT_STATUS_RETRY;
4649 xconn->smb2.send_queue_len--;
4650 DLIST_REMOVE(xconn->smb2.send_queue, e);
4652 if (e->ack.req == NULL) {
4654 talloc_free(e->mem_ctx);
4655 return NT_STATUS_OK;
4658 e->ack.required_acked_bytes = xconn->ack.unacked_bytes;
4659 DLIST_ADD_END(xconn->ack.queue, e);
4661 return NT_STATUS_OK;
4664 static NTSTATUS smbd_smb2_flush_with_sendmsg(struct smbXsrv_connection *xconn)
4671 if (xconn->smb2.send_queue == NULL) {
4672 TEVENT_FD_NOT_WRITEABLE(xconn->transport.fde);
4673 return NT_STATUS_OK;
4676 while (xconn->smb2.send_queue != NULL) {
4677 struct smbd_smb2_send_queue *e = xconn->smb2.send_queue;
4679 if (e->sendfile_header != NULL) {
4684 status = NT_STATUS_INTERNAL_ERROR;
4686 for (i=0; i < e->count; i++) {
4687 size += e->vector[i].iov_len;
4690 if (size <= e->sendfile_header->length) {
4691 buf = e->sendfile_header->data;
4693 buf = talloc_array(e->mem_ctx, uint8_t, size);
4695 return NT_STATUS_NO_MEMORY;
4700 for (i=0; i < e->count; i++) {
4702 e->vector[i].iov_base,
4703 e->vector[i].iov_len);
4704 size += e->vector[i].iov_len;
4707 e->sendfile_header->data = buf;
4708 e->sendfile_header->length = size;
4709 e->sendfile_status = &status;
4712 xconn->smb2.send_queue_len--;
4713 DLIST_REMOVE(xconn->smb2.send_queue, e);
4715 size += e->sendfile_body_size;
4718 * This triggers the sendfile path via
4721 talloc_free(e->mem_ctx);
4723 if (!NT_STATUS_IS_OK(status)) {
4724 smbXsrv_connection_disconnect_transport(xconn,
4728 xconn->ack.unacked_bytes += size;
4732 e->msg = (struct msghdr) {
4733 .msg_iov = e->vector,
4734 .msg_iovlen = e->count,
4737 ret = sendmsg(xconn->transport.sock, &e->msg, MSG_DONTWAIT);
4739 /* propagate end of file */
4740 return NT_STATUS_INTERNAL_ERROR;
4742 err = socket_error_from_errno(ret, errno, &retry);
4745 TEVENT_FD_WRITEABLE(xconn->transport.fde);
4746 return NT_STATUS_OK;
4749 status = map_nt_error_from_unix_common(err);
4750 smbXsrv_connection_disconnect_transport(xconn,
4755 status = smbd_smb2_advance_send_queue(xconn, &e, ret);
4756 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
4758 TEVENT_FD_WRITEABLE(xconn->transport.fde);
4759 return NT_STATUS_OK;
4761 if (!NT_STATUS_IS_OK(status)) {
4762 smbXsrv_connection_disconnect_transport(xconn,
4768 return NT_STATUS_OK;
4771 static void smbd_smb2_flush_completion_io_uring(struct samba_io_uring_qe *qe,
4772 const char *location);
4773 static void smbd_smb2_flush_completion_io_uring_splice(struct samba_io_uring_qe *qe,
4774 const char *location);
4776 static NTSTATUS smbd_smb2_flush_with_io_uring(struct smbXsrv_connection *xconn)
4778 struct smbd_smb2_send_queue *e = NULL;
4779 struct smbd_smb2_send_queue *next = NULL;
4780 static uint64_t _generation;
4781 uint64_t generation = _generation++;
4783 DBG_ERR("%s: generation[%llu] START\n", __location__, (unsigned long long)generation);
4784 for (e = xconn->smb2.send_queue; e != NULL; e = next) {
4785 struct samba_io_uring_qe *qe = NULL;
4786 unsigned sendmsg_flags = 0;
4790 DBG_ERR("%s: generation[%llu] preparing e[%p]\n", __location__, (unsigned long long)generation, e);
4791 SMB_ASSERT(e->sendfile_header == NULL);
4793 if (e->io_uring.num_qes != 0) {
4794 DBG_ERR("%s: generation[%llu] SKIP\n", __location__, (unsigned long long)generation);
4799 size_t l = iov_buflen(e->vector, e->count);
4800 DBG_ERR("%s: generation[%llu] => SENDMSG %zu e[%p]\n", __location__, (unsigned long long)generation, l, e);
4801 if (e->vfs_io_size > 0) {
4802 DBG_ERR("%s: generation[%llu] => MSG_MORE\n", __location__, (unsigned long long)generation);
4803 sendmsg_flags |= MSG_MORE;
4806 e->msg = (struct msghdr) {
4807 .msg_iov = e->vector,
4808 .msg_iovlen = e->count,
4811 qe = &e->io_uring.qes[e->io_uring.num_qes++];
4812 io_uring_prep_sendmsg(&qe->sqe,
4813 xconn->transport.sock,
4817 if (lp_parm_bool(-1, "smb2srv", "send_uring_async", false)) {
4818 const struct samba_io_uring_features *features =
4819 samba_io_uring_get_features(xconn->smb2.send_uring);
4821 if (features->flag_async) {
4822 DBG_ERR("%s: generation[%llu] => FORCE_ASYNC\n", __location__, (unsigned long long)generation);
4823 qe->force_async = true;
4826 // only works with force_async...
4827 qe->private_data = e;
4828 qe->completion_fn = smbd_smb2_flush_completion_io_uring;
4829 DBG_ERR("%s: generation[%llu] SETUP sendmsg\n", __location__, (unsigned long long)generation);
4830 } else if (e->vfs_io_size > 0) {
4831 int io_output_fd = smb_vfs_io_output_fd(e->vfs_io);
4833 DBG_ERR("%s: generation[%llu] => SPLICE %zu\n", __location__, (unsigned long long)generation, e->vfs_io_size);
4834 qe = &e->io_uring.qes[e->io_uring.num_qes++];
4835 io_uring_prep_splice(&qe->sqe,
4837 xconn->transport.sock, -1,
4842 if (lp_parm_bool(-1, "smb2srv", "send_uring_async", false)) {
4843 const struct samba_io_uring_features *features =
4844 samba_io_uring_get_features(xconn->smb2.send_uring);
4846 if (features->flag_async) {
4847 DBG_ERR("%s: generation[%llu] => FORCE_ASYNC\n", __location__, (unsigned long long)generation);
4848 qe->force_async = true;
4851 // only works with force_async...
4852 qe->private_data = e;
4853 qe->completion_fn = smbd_smb2_flush_completion_io_uring_splice;
4854 DBG_ERR("%s: generation[%llu] SETUP splice\n", __location__, (unsigned long long)generation);
4856 smb_panic(__location__);
4859 DBG_ERR("%s: generation[%llu] before samba_io_uring_request_submit e[%p]\n", __location__, (unsigned long long)generation, e);
4860 samba_io_uring_request_submit(xconn->smb2.send_uring,
4862 DBG_ERR("%s: generation[%llu] after samba_io_uring_request_submit e[%p]\n", __location__, (unsigned long long)generation, e);
4865 DBG_ERR("%s: generation[%llu] END\n", __location__, (unsigned long long)generation);
4866 return NT_STATUS_OK;
4869 static void smbd_smb2_flush_completion_io_uring(struct samba_io_uring_qe *qe,
4870 const char *location)
4872 struct smbd_smb2_send_queue *e =
4873 (struct smbd_smb2_send_queue *)qe->private_data;
4874 struct smbXsrv_connection *xconn = e->xconn;
4875 NTSTATUS status = NT_STATUS_OK;
4876 static uint64_t _generation;
4877 uint64_t generation = _generation++;
4880 DBG_ERR("%s: generation[%llu] e[%p]\n", __location__, (unsigned long long)generation, e);
4883 status = NT_STATUS_OK;
4884 } else if (ret == 0) {
4885 /* propagate end of file */
4886 status = NT_STATUS_INTERNAL_ERROR;
4887 } else if (ret < 0) {
4888 status = map_nt_error_from_unix_common(-ret);
4890 DBG_ERR("%s: generation[%llu] ret=%d %s\n", __location__, (unsigned long long)generation, ret, nt_errstr(status));
4891 if (!NT_STATUS_IS_OK(status)) {
4892 DBG_ERR("%s: generation[%llu]\n", __location__, (unsigned long long)generation);
4893 smbd_server_connection_terminate_ex(xconn,
4899 DBG_ERR("%s: generation[%llu] before smbd_smb2_advance_send_queue e[%p]\n", __location__, (unsigned long long)generation, e);
4900 status = smbd_smb2_advance_send_queue(xconn, &e, ret);
4901 DBG_ERR("%s: generation[%llu] after smbd_smb2_advance_send_queue e[%p]\n", __location__, (unsigned long long)generation, e);
4902 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
4904 ZERO_STRUCT(e->io_uring);
4905 DBG_ERR("%s: generation[%llu] RETRY before smbd_smb2_flush_send_queue e[%p]\n", __location__, (unsigned long long)generation, e);
4906 smbd_smb2_flush_send_queue(xconn);
4907 DBG_ERR("%s: generation[%llu] RETRY after smbd_smb2_flush_send_queue e[%p]\n", __location__, (unsigned long long)generation, e);
4910 if (!NT_STATUS_IS_OK(status)) {
4911 DBG_ERR("%s: generation[%llu]\n", __location__, (unsigned long long)generation);
4912 smbd_server_connection_terminate_ex(xconn,
4917 DBG_ERR("%s: generation[%llu] => COMPLETED\n", __location__, (unsigned long long)generation);
4918 SMB_ASSERT(e == NULL);
4919 DBG_ERR("%s: generation[%llu] => BEFORE FLUSH\n", __location__, (unsigned long long)generation);
4920 smbd_smb2_flush_send_queue(xconn);
4921 DBG_ERR("%s: generation[%llu] => AFTER FLUSH\n", __location__, (unsigned long long)generation);
4924 static void smbd_smb2_flush_completion_io_uring_splice(struct samba_io_uring_qe *qe,
4925 const char *location)
4927 struct smbd_smb2_send_queue *e =
4928 (struct smbd_smb2_send_queue *)qe->private_data;
4929 static uint64_t _generation;
4930 uint64_t generation = _generation++;
4932 DBG_ERR("%s: generation[%llu] e[%p]\n", __location__, (unsigned long long)generation, e);
4935 if (ret > 0 && (size_t)ret != e->vfs_io_size) {
4936 smb_panic("smbd_smb2_advance_send_queue ret != e->vfs_io_size");
4939 smbd_smb2_flush_completion_io_uring(qe, location);
4940 DBG_ERR("%s: generation[%llu]\n", __location__, (unsigned long long)generation);
4943 static NTSTATUS smbd_smb2_flush_send_queue(struct smbXsrv_connection *xconn)
4947 if (xconn->smb2.send_uring != NULL) {
4948 status = smbd_smb2_flush_with_io_uring(xconn);
4949 if (!NT_STATUS_IS_OK(status)) {
4953 status = smbd_smb2_flush_with_sendmsg(xconn);
4954 if (!NT_STATUS_IS_OK(status)) {
4960 * Restart reads if we were blocked on
4961 * draining the send queue.
4964 status = smbd_smb2_request_next_incoming(xconn);
4965 if (!NT_STATUS_IS_OK(status)) {
4969 return NT_STATUS_OK;
4972 static NTSTATUS smbd_smb2_io_handler(struct smbXsrv_connection *xconn,
4975 struct smbd_server_connection *sconn = xconn->client->sconn;
4976 struct smbd_smb2_request_read_state *state = &xconn->smb2.request_read_state;
4977 struct smbd_smb2_request *req = NULL;
4978 size_t min_recvfile_size = UINT32_MAX;
4986 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
4988 * we're not supposed to do any io
4990 TEVENT_FD_NOT_READABLE(xconn->transport.fde);
4991 TEVENT_FD_NOT_WRITEABLE(xconn->transport.fde);
4992 return NT_STATUS_OK;
4995 if (fde_flags & TEVENT_FD_WRITE) {
4996 status = smbd_smb2_flush_send_queue(xconn);
4997 if (!NT_STATUS_IS_OK(status)) {
5002 if (!(fde_flags & TEVENT_FD_READ)) {
5003 return NT_STATUS_OK;
5006 if (state->req == NULL) {
5007 TEVENT_FD_NOT_READABLE(xconn->transport.fde);
5008 return NT_STATUS_OK;
5015 int getsockopt(int sockfd, int level, int optname,
5016 void *optval, socklen_t *optlen);
5020 int len = sizeof(rx_cpu);
5021 int my_cpu = sched_getcpu();
5022 ret = getsockopt(xconn->transport.sock, SOL_SOCKET, SO_INCOMING_CPU, &rx_cpu, &len);
5023 DBG_ERR("%s: channel_id=%llu ret=%d rx_cpu=%d my_cpu=%d\n", __location__,
5024 (unsigned long long )xconn->channel_id, ret, rx_cpu, my_cpu);
5029 if (!state->hdr.done) {
5030 state->hdr.done = true;
5032 state->vector.iov_base = (void *)state->hdr.nbt;
5033 state->vector.iov_len = NBT_HDR_SIZE;
5036 msg = (struct msghdr) {
5037 .msg_iov = &state->vector,
5041 ret = recvmsg(xconn->transport.sock, &msg, MSG_DONTWAIT);
5043 /* propagate end of file */
5044 status = NT_STATUS_END_OF_FILE;
5045 smbXsrv_connection_disconnect_transport(xconn,
5049 err = socket_error_from_errno(ret, errno, &retry);
5052 TEVENT_FD_READABLE(xconn->transport.fde);
5053 return NT_STATUS_OK;
5056 status = map_nt_error_from_unix_common(err);
5057 smbXsrv_connection_disconnect_transport(xconn,
5062 if (ret < state->vector.iov_len) {
5064 base = (uint8_t *)state->vector.iov_base;
5066 state->vector.iov_base = (void *)base;
5067 state->vector.iov_len -= ret;
5068 /* we have more to read */
5069 TEVENT_FD_READABLE(xconn->transport.fde);
5070 return NT_STATUS_OK;
5073 if (state->pktlen > 0) {
5074 if (state->doing_receivefile && !is_smb2_recvfile_write(state)) {
5076 * Not a possible receivefile write.
5077 * Read the rest of the data.
5079 state->doing_receivefile = false;
5081 state->pktbuf = talloc_realloc(state->req,
5085 if (state->pktbuf == NULL) {
5086 return NT_STATUS_NO_MEMORY;
5089 state->vector.iov_base = (void *)(state->pktbuf +
5091 state->vector.iov_len = (state->pktfull -
5094 state->pktlen = state->pktfull;
5099 * Either this is a receivefile write so we've
5100 * done a short read, or if not we have all the data.
5106 * Now we analyze the NBT header
5108 if (state->hdr.nbt[0] != 0x00) {
5109 state->min_recv_size = 0;
5111 state->pktfull = smb2_len(state->hdr.nbt);
5112 if (state->pktfull == 0) {
5116 if (state->min_recv_size != 0) {
5117 min_recvfile_size = SMBD_SMB2_SHORT_RECEIVEFILE_WRITE_LEN;
5118 min_recvfile_size += state->min_recv_size;
5121 if (state->pktfull > min_recvfile_size) {
5123 * Might be a receivefile write. Read the SMB2 HEADER +
5124 * SMB2_WRITE header first. Set 'doing_receivefile'
5125 * as we're *attempting* receivefile write. If this
5126 * turns out not to be a SMB2_WRITE request or otherwise
5127 * not suitable then we'll just read the rest of the data
5128 * the next time this function is called.
5130 state->pktlen = SMBD_SMB2_SHORT_RECEIVEFILE_WRITE_LEN;
5131 state->doing_receivefile = true;
5133 state->pktlen = state->pktfull;
5136 state->pktbuf = talloc_array(state->req, uint8_t, state->pktlen);
5137 if (state->pktbuf == NULL) {
5138 return NT_STATUS_NO_MEMORY;
5141 state->vector.iov_base = (void *)state->pktbuf;
5142 state->vector.iov_len = state->pktlen;
5148 if (state->hdr.nbt[0] != 0x00) {
5149 DEBUG(1,("ignore NBT[0x%02X] msg\n",
5150 state->hdr.nbt[0]));
5153 ZERO_STRUCTP(state);
5155 state->min_recv_size = lp_min_receive_file_size();
5163 req->request_time = timeval_current();
5164 now = timeval_to_nttime(&req->request_time);
5166 status = smbd_smb2_inbuf_parse_compound(xconn,
5172 &req->in.vector_count);
5173 if (!NT_STATUS_IS_OK(status)) {
5177 if (state->doing_receivefile) {
5178 req->smb1req = talloc_zero(req, struct smb_request);
5179 if (req->smb1req == NULL) {
5180 return NT_STATUS_NO_MEMORY;
5182 req->smb1req->unread_bytes = state->pktfull - state->pktlen;
5185 ZERO_STRUCTP(state);
5187 req->current_idx = 1;
5189 DEBUG(10,("smbd_smb2_request idx[%d] of %d vectors\n",
5190 req->current_idx, req->in.vector_count));
5192 status = smbd_smb2_request_validate(req);
5193 if (!NT_STATUS_IS_OK(status)) {
5197 status = smbd_smb2_request_setup_out(req);
5198 if (!NT_STATUS_IS_OK(status)) {
5202 status = smbd_smb2_request_dispatch(req);
5203 if (!NT_STATUS_IS_OK(status)) {
5207 sconn->num_requests++;
5209 /* The timeout_processing function isn't run nearly
5210 often enough to implement 'max log size' without
5211 overrunning the size of the file by many megabytes.
5212 This is especially true if we are running at debug
5213 level 10. Checking every 50 SMB2s is a nice
5214 tradeoff of performance vs log file size overrun. */
5216 if ((sconn->num_requests % 50) == 0 &&
5217 need_to_check_log_size()) {
5218 change_to_root_user();
5222 status = smbd_smb2_request_next_incoming(xconn);
5223 if (!NT_STATUS_IS_OK(status)) {
5227 return NT_STATUS_OK;
5230 static void smbd_smb2_connection_handler(struct tevent_context *ev,
5231 struct tevent_fd *fde,
5235 struct smbXsrv_connection *xconn =
5236 talloc_get_type_abort(private_data,
5237 struct smbXsrv_connection);
5240 status = smbd_smb2_io_handler(xconn, flags);
5241 if (!NT_STATUS_IS_OK(status)) {
5242 smbd_server_connection_terminate(xconn, nt_errstr(status));