+static bool dup_smb2_vec3(TALLOC_CTX *ctx,
+ struct iovec *outvec,
+ const struct iovec *srcvec)
+{
+ /* vec[0] is always boilerplate and must
+ * be allocated with size OUTVEC_ALLOC_SIZE. */
+
+ outvec[0].iov_base = talloc_memdup(ctx,
+ srcvec[0].iov_base,
+ OUTVEC_ALLOC_SIZE);
+ if (!outvec[0].iov_base) {
+ return false;
+ }
+ outvec[0].iov_len = SMB2_HDR_BODY;
+
+ /*
+ * If this is a "standard" vec[1] of length 8,
+ * pointing to srcvec[0].iov_base + SMB2_HDR_BODY,
+ * then duplicate this. Else use talloc_memdup().
+ */
+
+ if (srcvec[1].iov_len == 8 &&
+ srcvec[1].iov_base ==
+ ((uint8_t *)srcvec[0].iov_base) +
+ SMB2_HDR_BODY) {
+ outvec[1].iov_base = ((uint8_t *)outvec[0].iov_base) +
+ SMB2_HDR_BODY;
+ outvec[1].iov_len = 8;
+ } else {
+ outvec[1].iov_base = talloc_memdup(ctx,
+ srcvec[1].iov_base,
+ srcvec[1].iov_len);
+ if (!outvec[1].iov_base) {
+ return false;
+ }
+ outvec[1].iov_len = srcvec[1].iov_len;
+ }
+
+ /*
+ * If this is a "standard" vec[2] of length 1,
+ * pointing to srcvec[0].iov_base + (OUTVEC_ALLOC_SIZE - 1)
+ * then duplicate this. Else use talloc_memdup().
+ */
+
+ if (srcvec[2].iov_base &&
+ srcvec[2].iov_len) {
+ if (srcvec[2].iov_base ==
+ ((uint8_t *)srcvec[0].iov_base) +
+ (OUTVEC_ALLOC_SIZE - 1) &&
+ srcvec[2].iov_len == 1) {
+ /* Common SMB2 error packet case. */
+ outvec[2].iov_base = ((uint8_t *)outvec[0].iov_base) +
+ (OUTVEC_ALLOC_SIZE - 1);
+ } else {
+ outvec[2].iov_base = talloc_memdup(ctx,
+ srcvec[2].iov_base,
+ srcvec[2].iov_len);
+ if (!outvec[2].iov_base) {
+ return false;
+ }
+ }
+ outvec[2].iov_len = srcvec[2].iov_len;
+ } else {
+ outvec[2].iov_base = NULL;
+ outvec[2].iov_len = 0;
+ }
+ return true;
+}
+
+static struct smbd_smb2_request *dup_smb2_req(const struct smbd_smb2_request *req)
+{
+ struct smbd_smb2_request *newreq = NULL;
+ struct iovec *outvec = NULL;
+ int count = req->out.vector_count;
+ int i;
+
+ newreq = smbd_smb2_request_allocate(req->sconn);
+ if (!newreq) {
+ return NULL;
+ }
+
+ newreq->sconn = req->sconn;
+ newreq->session = req->session;
+ newreq->do_signing = req->do_signing;
+ newreq->current_idx = req->current_idx;
+ newreq->async = false;
+ newreq->cancelled = false;
+ /* Note we are leaving:
+ ->tcon
+ ->smb1req
+ ->compat_chain_fsp
+ uninitialized as NULL here as
+ they're not used in the interim
+ response code. JRA. */
+
+ outvec = talloc_zero_array(newreq, struct iovec, count);
+ if (!outvec) {
+ TALLOC_FREE(newreq);
+ return NULL;
+ }
+ newreq->out.vector = outvec;
+ newreq->out.vector_count = count;
+
+ /* Setup the outvec's identically to req. */
+ outvec[0].iov_base = newreq->out.nbt_hdr;
+ outvec[0].iov_len = 4;
+ memcpy(newreq->out.nbt_hdr, req->out.nbt_hdr, 4);
+
+ /* Setup the vectors identically to the ones in req. */
+ for (i = 1; i < count; i += 3) {
+ if (!dup_smb2_vec3(outvec, &outvec[i], &req->out.vector[i])) {
+ break;
+ }
+ }
+
+ if (i < count) {
+ /* Alloc failed. */
+ TALLOC_FREE(newreq);
+ return NULL;
+ }
+
+ smb2_setup_nbt_length(newreq->out.vector,
+ newreq->out.vector_count);
+
+ return newreq;
+}
+
+static void smbd_smb2_request_writev_done(struct tevent_req *subreq);
+
+static NTSTATUS smb2_send_async_interim_response(const struct smbd_smb2_request *req)
+{
+ int i = 0;
+ uint8_t *outhdr = NULL;
+ struct smbd_smb2_request *nreq = NULL;
+
+ /* Create a new smb2 request we'll use
+ for the interim return. */
+ nreq = dup_smb2_req(req);
+ if (!nreq) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ /* Lose the last 3 out vectors. They're the
+ ones we'll be using for the async reply. */
+ nreq->out.vector_count -= 3;
+
+ smb2_setup_nbt_length(nreq->out.vector,
+ nreq->out.vector_count);
+
+ /* Step back to the previous reply. */
+ i = nreq->current_idx - 3;
+ outhdr = (uint8_t *)nreq->out.vector[i].iov_base;
+ /* And end the chain. */
+ SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, 0);
+
+ /* Calculate outgoing credits */
+ smb2_calculate_credits(req, nreq);
+
+ /* Re-sign if needed. */
+ if (nreq->do_signing) {
+ NTSTATUS status;
+ status = smb2_signing_sign_pdu(nreq->session->session_key,
+ &nreq->out.vector[i], 3);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ }
+ if (DEBUGLEVEL >= 10) {
+ dbgtext("smb2_send_async_interim_response: nreq->current_idx = %u\n",
+ (unsigned int)nreq->current_idx );
+ dbgtext("smb2_send_async_interim_response: returning %u vectors\n",
+ (unsigned int)nreq->out.vector_count );
+ print_req_vectors(nreq);
+ }
+ nreq->subreq = tstream_writev_queue_send(nreq,
+ nreq->sconn->smb2.event_ctx,
+ nreq->sconn->smb2.stream,
+ nreq->sconn->smb2.send_queue,
+ nreq->out.vector,
+ nreq->out.vector_count);
+
+ if (nreq->subreq == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ tevent_req_set_callback(nreq->subreq,
+ smbd_smb2_request_writev_done,
+ nreq);
+
+ return NT_STATUS_OK;
+}
+
+struct smbd_smb2_request_pending_state {
+ struct smbd_server_connection *sconn;
+ uint8_t buf[4 + SMB2_HDR_BODY + 0x08 + 1];
+ struct iovec vector[3];
+};
+
+static void smbd_smb2_request_pending_writev_done(struct tevent_req *subreq)
+{
+ struct smbd_smb2_request_pending_state *state =
+ tevent_req_callback_data(subreq,
+ struct smbd_smb2_request_pending_state);
+ struct smbd_server_connection *sconn = state->sconn;
+ int ret;
+ int sys_errno;
+
+ ret = tstream_writev_queue_recv(subreq, &sys_errno);
+ TALLOC_FREE(subreq);
+ if (ret == -1) {
+ NTSTATUS status = map_nt_error_from_unix(sys_errno);
+ smbd_server_connection_terminate(sconn, nt_errstr(status));
+ return;
+ }
+
+ TALLOC_FREE(state);
+}
+
+NTSTATUS smbd_smb2_request_pending_queue(struct smbd_smb2_request *req,
+ struct tevent_req *subreq)
+{
+ NTSTATUS status;
+ struct smbd_smb2_request_pending_state *state = NULL;
+ int i = req->current_idx;
+ uint8_t *reqhdr = NULL;
+ uint8_t *hdr = NULL;
+ uint8_t *body = NULL;
+ uint32_t flags = 0;
+ uint64_t message_id = 0;
+ uint64_t async_id = 0;
+ struct iovec *outvec = NULL;
+
+ if (!tevent_req_is_in_progress(subreq)) {
+ return NT_STATUS_OK;
+ }
+
+ req->subreq = subreq;
+ subreq = NULL;
+
+ if (req->async) {
+ /* We're already async. */
+ return NT_STATUS_OK;
+ }
+
+ if (req->in.vector_count > i + 3) {
+ /*
+ * We're trying to go async in a compound
+ * request chain. This is not allowed.
+ * Cancel the outstanding request.
+ */
+ tevent_req_cancel(req->subreq);
+ return smbd_smb2_request_error(req,
+ NT_STATUS_INSUFFICIENT_RESOURCES);
+ }
+
+ if (DEBUGLEVEL >= 10) {
+ dbgtext("smbd_smb2_request_pending_queue: req->current_idx = %u\n",
+ (unsigned int)req->current_idx );
+ print_req_vectors(req);
+ }
+
+ if (req->out.vector_count > 4) {
+ /* This is a compound reply. We
+ * must do an interim response
+ * followed by the async response
+ * to match W2K8R2.
+ */
+ status = smb2_send_async_interim_response(req);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ }
+
+ /* Don't return an intermediate packet on a pipe read/write. */
+ if (req->tcon && req->tcon->compat_conn && IS_IPC(req->tcon->compat_conn)) {
+ return NT_STATUS_OK;
+ }
+
+ reqhdr = (uint8_t *)req->out.vector[i].iov_base;
+ flags = (IVAL(reqhdr, SMB2_HDR_FLAGS) & ~SMB2_HDR_FLAG_CHAINED);
+ message_id = BVAL(reqhdr, SMB2_HDR_MESSAGE_ID);
+ async_id = message_id; /* keep it simple for now... */
+
+ /*
+ * What we send is identical to a smbd_smb2_request_error
+ * packet with an error status of STATUS_PENDING. Make use
+ * of this fact sometime when refactoring. JRA.
+ */
+
+ state = talloc_zero(req->sconn, struct smbd_smb2_request_pending_state);
+ if (state == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ state->sconn = req->sconn;
+
+ state->vector[0].iov_base = (void *)state->buf;
+ state->vector[0].iov_len = 4;
+
+ state->vector[1].iov_base = state->buf + 4;
+ state->vector[1].iov_len = SMB2_HDR_BODY;
+
+ state->vector[2].iov_base = state->buf + 4 + SMB2_HDR_BODY;
+ state->vector[2].iov_len = 9;
+
+ smb2_setup_nbt_length(state->vector, 3);
+
+ hdr = (uint8_t *)state->vector[1].iov_base;
+ body = (uint8_t *)state->vector[2].iov_base;
+
+ SIVAL(hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
+ SSVAL(hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
+ SSVAL(hdr, SMB2_HDR_EPOCH, 0);
+ SIVAL(hdr, SMB2_HDR_STATUS, NT_STATUS_V(STATUS_PENDING));
+ SSVAL(hdr, SMB2_HDR_OPCODE, SVAL(reqhdr, SMB2_HDR_OPCODE));
+
+ SIVAL(hdr, SMB2_HDR_FLAGS, flags | SMB2_HDR_FLAG_ASYNC);
+ SIVAL(hdr, SMB2_HDR_NEXT_COMMAND, 0);
+ SBVAL(hdr, SMB2_HDR_MESSAGE_ID, message_id);
+ SBVAL(hdr, SMB2_HDR_PID, async_id);
+ SBVAL(hdr, SMB2_HDR_SESSION_ID,
+ BVAL(reqhdr, SMB2_HDR_SESSION_ID));
+ memset(hdr+SMB2_HDR_SIGNATURE, 0, 16);
+
+ SSVAL(body, 0x00, 0x08 + 1);
+
+ SCVAL(body, 0x02, 0);
+ SCVAL(body, 0x03, 0);
+ SIVAL(body, 0x04, 0);
+ /* Match W2K8R2... */
+ SCVAL(body, 0x08, 0x21);
+
+ /* Ensure we correctly go through crediting. Grant
+ the credits now, and zero credits on the final
+ response. */
+ smb2_set_operation_credit(req->sconn,
+ &req->in.vector[i],
+ &state->vector[1]);
+
+ if (req->do_signing) {
+ status = smb2_signing_sign_pdu(req->session->session_key,
+ &state->vector[1], 2);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ }
+
+ subreq = tstream_writev_queue_send(state,
+ req->sconn->smb2.event_ctx,
+ req->sconn->smb2.stream,
+ req->sconn->smb2.send_queue,
+ state->vector,
+ 3);
+
+ if (subreq == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ tevent_req_set_callback(subreq,
+ smbd_smb2_request_pending_writev_done,
+ state);
+
+ /* Note we're going async with this request. */
+ req->async = true;
+
+ /*
+ * Now manipulate req so that the outstanding async request
+ * is the only one left in the struct smbd_smb2_request.
+ */
+
+ if (req->current_idx == 1) {
+ /* There was only one. */
+ goto out;
+ }
+
+ /* Re-arrange the in.vectors. */
+ req->in.vector[1] = req->in.vector[i];
+ req->in.vector[2] = req->in.vector[i+1];
+ req->in.vector[3] = req->in.vector[i+2];
+ req->in.vector_count = 4;
+ /* Reset the new in size. */
+ smb2_setup_nbt_length(req->in.vector, 4);
+
+ /* Now recreate the out.vectors. */
+ outvec = talloc_zero_array(req, struct iovec, 4);
+ if (!outvec) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ /* 0 is always boilerplate and must
+ * be of size 4 for the length field. */
+
+ outvec[0].iov_base = req->out.nbt_hdr;
+ outvec[0].iov_len = 4;
+ SIVAL(req->out.nbt_hdr, 0, 0);
+
+ if (!dup_smb2_vec3(outvec, &outvec[1], &req->out.vector[i])) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ TALLOC_FREE(req->out.vector);
+
+ req->out.vector = outvec;
+
+ req->current_idx = 1;
+ req->out.vector_count = 4;
+
+ out:
+
+ smb2_setup_nbt_length(req->out.vector,
+ req->out.vector_count);
+
+ /* Ensure our final reply matches the interim one. */
+ reqhdr = (uint8_t *)req->out.vector[1].iov_base;
+ SIVAL(reqhdr, SMB2_HDR_FLAGS, flags | SMB2_HDR_FLAG_ASYNC);
+ SBVAL(reqhdr, SMB2_HDR_PID, async_id);
+
+ {
+ const uint8_t *inhdr =
+ (const uint8_t *)req->in.vector[1].iov_base;
+ DEBUG(10,("smbd_smb2_request_pending_queue: opcode[%s] mid %llu "
+ "going async\n",
+ smb2_opcode_name((uint16_t)IVAL(inhdr, SMB2_HDR_OPCODE)),
+ (unsigned long long)async_id ));
+ }
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS smbd_smb2_request_process_cancel(struct smbd_smb2_request *req)
+{
+ struct smbd_server_connection *sconn = req->sconn;
+ struct smbd_smb2_request *cur;
+ const uint8_t *inhdr;
+ int i = req->current_idx;
+ uint32_t flags;
+ uint64_t search_message_id;
+ uint64_t search_async_id;
+ uint64_t found_id;
+
+ inhdr = (const uint8_t *)req->in.vector[i].iov_base;
+
+ flags = IVAL(inhdr, SMB2_HDR_FLAGS);
+ search_message_id = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
+ search_async_id = BVAL(inhdr, SMB2_HDR_PID);
+
+ /*
+ * we don't need the request anymore
+ * cancel requests never have a response
+ */
+ DLIST_REMOVE(req->sconn->smb2.requests, req);
+ TALLOC_FREE(req);
+
+ for (cur = sconn->smb2.requests; cur; cur = cur->next) {
+ const uint8_t *outhdr;
+ uint64_t message_id;
+ uint64_t async_id;
+
+ i = cur->current_idx;
+
+ outhdr = (const uint8_t *)cur->out.vector[i].iov_base;
+
+ message_id = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
+ async_id = BVAL(outhdr, SMB2_HDR_PID);
+
+ if (flags & SMB2_HDR_FLAG_ASYNC) {
+ if (search_async_id == async_id) {
+ found_id = async_id;
+ break;
+ }
+ } else {
+ if (search_message_id == message_id) {
+ found_id = message_id;
+ break;
+ }
+ }
+ }
+
+ if (cur && cur->subreq) {
+ inhdr = (const uint8_t *)cur->in.vector[i].iov_base;
+ DEBUG(10,("smbd_smb2_request_process_cancel: attempting to "
+ "cancel opcode[%s] mid %llu\n",
+ smb2_opcode_name((uint16_t)IVAL(inhdr, SMB2_HDR_OPCODE)),
+ (unsigned long long)found_id ));
+ tevent_req_cancel(cur->subreq);
+ }
+
+ return NT_STATUS_OK;
+}
+
+/*************************************************************
+ Ensure an incoming tid is a valid one for us to access.
+ Change to the associated uid credentials and chdir to the
+ valid tid directory.
+*************************************************************/
+
+static NTSTATUS smbd_smb2_request_check_tcon(struct smbd_smb2_request *req)
+{
+ const uint8_t *inhdr;
+ const uint8_t *outhdr;
+ int i = req->current_idx;
+ uint32_t in_tid;
+ void *p;
+ struct smbd_smb2_tcon *tcon;
+ bool chained_fixup = false;
+
+ inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
+
+ in_tid = IVAL(inhdr, SMB2_HDR_TID);
+
+ if (in_tid == (0xFFFFFFFF)) {
+ if (req->async) {
+ /*
+ * async request - fill in tid from
+ * already setup out.vector[].iov_base.
+ */
+ outhdr = (const uint8_t *)req->out.vector[i].iov_base;
+ in_tid = IVAL(outhdr, SMB2_HDR_TID);
+ } else if (i > 2) {
+ /*
+ * Chained request - fill in tid from
+ * the previous request out.vector[].iov_base.
+ */
+ outhdr = (const uint8_t *)req->out.vector[i-3].iov_base;
+ in_tid = IVAL(outhdr, SMB2_HDR_TID);
+ chained_fixup = true;
+ }
+ }
+
+ /* lookup an existing session */
+ p = idr_find(req->session->tcons.idtree, in_tid);
+ if (p == NULL) {
+ return NT_STATUS_NETWORK_NAME_DELETED;
+ }
+ tcon = talloc_get_type_abort(p, struct smbd_smb2_tcon);
+
+ if (!change_to_user(tcon->compat_conn,req->session->vuid)) {
+ return NT_STATUS_ACCESS_DENIED;
+ }
+
+ /* should we pass FLAG_CASELESS_PATHNAMES here? */
+ if (!set_current_service(tcon->compat_conn, 0, true)) {
+ return NT_STATUS_ACCESS_DENIED;
+ }
+
+ req->tcon = tcon;
+
+ if (chained_fixup) {
+ /* Fix up our own outhdr. */
+ outhdr = (const uint8_t *)req->out.vector[i].iov_base;
+ SIVAL(discard_const_p(uint8_t, outhdr), SMB2_HDR_TID, in_tid);
+ }
+
+ return NT_STATUS_OK;
+}
+
+/*************************************************************
+ Ensure an incoming session_id is a valid one for us to access.
+*************************************************************/
+
+static NTSTATUS smbd_smb2_request_check_session(struct smbd_smb2_request *req)
+{
+ const uint8_t *inhdr;
+ const uint8_t *outhdr;
+ int i = req->current_idx;
+ uint64_t in_session_id;
+ void *p;
+ struct smbd_smb2_session *session;
+ bool chained_fixup = false;
+
+ inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
+
+ in_session_id = BVAL(inhdr, SMB2_HDR_SESSION_ID);
+
+ if (in_session_id == (0xFFFFFFFFFFFFFFFFLL)) {
+ if (req->async) {
+ /*
+ * async request - fill in session_id from
+ * already setup request out.vector[].iov_base.
+ */
+ outhdr = (const uint8_t *)req->out.vector[i].iov_base;
+ in_session_id = BVAL(outhdr, SMB2_HDR_SESSION_ID);
+ } else if (i > 2) {
+ /*
+ * Chained request - fill in session_id from
+ * the previous request out.vector[].iov_base.
+ */
+ outhdr = (const uint8_t *)req->out.vector[i-3].iov_base;
+ in_session_id = BVAL(outhdr, SMB2_HDR_SESSION_ID);
+ chained_fixup = true;
+ }
+ }
+
+ /* lookup an existing session */
+ p = idr_find(req->sconn->smb2.sessions.idtree, in_session_id);
+ if (p == NULL) {
+ return NT_STATUS_USER_SESSION_DELETED;
+ }
+ session = talloc_get_type_abort(p, struct smbd_smb2_session);
+
+ if (!NT_STATUS_IS_OK(session->status)) {
+ return NT_STATUS_ACCESS_DENIED;
+ }
+
+ set_current_user_info(session->session_info->unix_info->sanitized_username,
+ session->session_info->unix_info->unix_name,
+ session->session_info->info->domain_name);
+
+ req->session = session;
+
+ if (chained_fixup) {
+ /* Fix up our own outhdr. */
+ outhdr = (const uint8_t *)req->out.vector[i].iov_base;
+ SBVAL(discard_const_p(uint8_t, outhdr), SMB2_HDR_SESSION_ID, in_session_id);
+ }
+ return NT_STATUS_OK;
+}
+
+NTSTATUS smbd_smb2_request_dispatch(struct smbd_smb2_request *req)