2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "smbd/globals.h"
23 #include "../libcli/smb/smb_common.h"
25 struct smbd_smb2_lock_element {
31 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
32 struct tevent_context *ev,
33 struct smbd_smb2_request *smb2req,
35 uint64_t in_file_id_volatile,
36 uint16_t in_lock_count,
37 struct smbd_smb2_lock_element *in_locks);
38 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
40 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
41 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
44 const uint8_t *inbody;
45 const int i = req->current_idx;
46 size_t expected_body_size = 0x30;
49 uint16_t in_lock_count;
50 uint64_t in_file_id_persistent;
51 uint64_t in_file_id_volatile;
52 struct smbd_smb2_lock_element *in_locks;
53 struct tevent_req *subreq;
54 const uint8_t *lock_buffer;
57 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
58 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
59 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
62 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
64 body_size = SVAL(inbody, 0x00);
65 if (body_size != expected_body_size) {
66 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
69 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
71 in_lock_count = CVAL(inbody, 0x02);
72 /* 0x04 - 4 bytes reserved */
73 in_file_id_persistent = BVAL(inbody, 0x08);
74 in_file_id_volatile = BVAL(inbody, 0x10);
76 if (in_lock_count < 1) {
77 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
80 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
81 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
84 if (req->compat_chain_fsp) {
86 } else if (in_file_id_persistent != 0) {
87 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
90 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
92 if (in_locks == NULL) {
93 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
97 lock_buffer = inbody + 0x18;
99 in_locks[l].offset = BVAL(lock_buffer, 0x00);
100 in_locks[l].length = BVAL(lock_buffer, 0x08);
101 in_locks[l].flags = IVAL(lock_buffer, 0x10);
102 /* 0x14 - 4 reserved bytes */
104 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
106 for (l=1; l < in_lock_count; l++) {
107 in_locks[l].offset = BVAL(lock_buffer, 0x00);
108 in_locks[l].length = BVAL(lock_buffer, 0x08);
109 in_locks[l].flags = IVAL(lock_buffer, 0x10);
110 /* 0x14 - 4 reserved bytes */
115 subreq = smbd_smb2_lock_send(req,
116 req->sconn->smb2.event_ctx,
122 if (subreq == NULL) {
123 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
125 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
127 return smbd_smb2_request_pending_queue(req, subreq);
130 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
132 struct smbd_smb2_request *req = tevent_req_callback_data(subreq,
133 struct smbd_smb2_request);
136 NTSTATUS error; /* transport error */
138 if (req->cancelled) {
139 const uint8_t *inhdr = (const uint8_t *)
140 req->in.vector[req->current_idx].iov_base;
141 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
143 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
144 (unsigned long long)mid ));
145 error = smbd_smb2_request_error(req, NT_STATUS_CANCELLED);
146 if (!NT_STATUS_IS_OK(error)) {
147 smbd_server_connection_terminate(req->sconn,
154 status = smbd_smb2_lock_recv(subreq);
156 if (!NT_STATUS_IS_OK(status)) {
157 error = smbd_smb2_request_error(req, status);
158 if (!NT_STATUS_IS_OK(error)) {
159 smbd_server_connection_terminate(req->sconn,
166 outbody = data_blob_talloc(req->out.vector, NULL, 0x04);
167 if (outbody.data == NULL) {
168 error = smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
169 if (!NT_STATUS_IS_OK(error)) {
170 smbd_server_connection_terminate(req->sconn,
177 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
178 SSVAL(outbody.data, 0x02, 0); /* reserved */
180 error = smbd_smb2_request_done(req, outbody, NULL);
181 if (!NT_STATUS_IS_OK(error)) {
182 smbd_server_connection_terminate(req->sconn,
188 struct smbd_smb2_lock_state {
189 struct smbd_smb2_request *smb2req;
190 struct smb_request *smb1req;
191 struct blocking_lock_record *blr;
193 struct smbd_lock_element *locks;
196 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
197 struct tevent_context *ev,
198 struct smbd_smb2_request *smb2req,
200 uint64_t in_file_id_volatile,
201 uint16_t in_lock_count,
202 struct smbd_smb2_lock_element *in_locks)
204 struct tevent_req *req;
205 struct smbd_smb2_lock_state *state;
206 struct smb_request *smb1req;
207 connection_struct *conn = smb2req->tcon->compat_conn;
209 int32_t timeout = -1;
210 bool isunlock = false;
212 struct smbd_lock_element *locks;
216 req = tevent_req_create(mem_ctx, &state,
217 struct smbd_smb2_lock_state);
221 state->smb2req = smb2req;
222 smb1req = smbd_smb2_fake_smb_request(smb2req);
223 if (tevent_req_nomem(smb1req, req)) {
224 return tevent_req_post(req, ev);
226 state->smb1req = smb1req;
228 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
229 (unsigned long long)in_file_id_volatile));
231 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
233 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
234 return tevent_req_post(req, ev);
236 if (conn != fsp->conn) {
237 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
238 return tevent_req_post(req, ev);
240 if (smb2req->session->vuid != fsp->vuid) {
241 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
242 return tevent_req_post(req, ev);
245 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
247 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
248 return tevent_req_post(req, ev);
251 switch (in_locks[0].flags) {
252 case SMB2_LOCK_FLAG_SHARED:
253 case SMB2_LOCK_FLAG_EXCLUSIVE:
254 if (in_lock_count > 1) {
255 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
256 return tevent_req_post(req, ev);
261 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
262 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
266 case SMB2_LOCK_FLAG_UNLOCK:
267 /* only the first lock gives the UNLOCK bit - see
274 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
275 return tevent_req_post(req, ev);
278 for (i=0; i<in_lock_count; i++) {
280 bool invalid = false;
282 switch (in_locks[i].flags) {
283 case SMB2_LOCK_FLAG_SHARED:
284 case SMB2_LOCK_FLAG_EXCLUSIVE:
286 tevent_req_nterror(req,
287 NT_STATUS_INVALID_PARAMETER);
288 return tevent_req_post(req, ev);
291 tevent_req_nterror(req,
292 NT_STATUS_INVALID_PARAMETER);
293 return tevent_req_post(req, ev);
297 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
298 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
300 tevent_req_nterror(req,
301 NT_STATUS_INVALID_PARAMETER);
302 return tevent_req_post(req, ev);
306 case SMB2_LOCK_FLAG_UNLOCK:
308 tevent_req_nterror(req,
309 NT_STATUS_INVALID_PARAMETER);
310 return tevent_req_post(req, ev);
317 * is the first element was a UNLOCK
318 * we need to deferr the error response
319 * to the backend, because we need to process
320 * all unlock elements before
325 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
326 return tevent_req_post(req, ev);
329 locks[i].smbpid = in_smbpid;
330 locks[i].offset = in_locks[i].offset;
331 locks[i].count = in_locks[i].length;
333 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
334 locks[i].brltype = WRITE_LOCK;
335 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
336 locks[i].brltype = READ_LOCK;
337 } else if (invalid) {
339 * this is an invalid UNLOCK element
340 * and the backend needs to test for
341 * brltype != UNLOCK_LOCK and return
342 * NT_STATUS_INVALID_PARAMER
344 locks[i].brltype = READ_LOCK;
346 locks[i].brltype = UNLOCK_LOCK;
349 max_count = UINT64_MAX - locks[i].offset;
350 if (locks[i].count > max_count) {
351 tevent_req_nterror(req, NT_STATUS_INVALID_LOCK_RANGE);
352 return tevent_req_post(req, ev);
356 state->locks = locks;
357 state->lock_count = in_lock_count;
360 status = smbd_do_locking(smb1req, fsp,
369 status = smbd_do_locking(smb1req, fsp,
378 if (!NT_STATUS_IS_OK(status)) {
379 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
380 status = NT_STATUS_LOCK_NOT_GRANTED;
382 tevent_req_nterror(req, status);
383 return tevent_req_post(req, ev);
390 tevent_req_done(req);
391 return tevent_req_post(req, ev);
394 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
398 if (tevent_req_is_nterror(req, &status)) {
399 tevent_req_received(req);
403 tevent_req_received(req);
407 /****************************************************************
408 Cancel an outstanding blocking lock request.
409 *****************************************************************/
411 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
413 struct smbd_smb2_request *smb2req = NULL;
414 struct smbd_smb2_lock_state *state = tevent_req_data(req,
415 struct smbd_smb2_lock_state);
420 if (!state->smb2req) {
424 smb2req = state->smb2req;
425 smb2req->cancelled = true;
427 tevent_req_done(req);
431 /****************************************************************
432 Got a message saying someone unlocked a file. Re-schedule all
433 blocking lock requests as we don't know if anything overlapped.
434 *****************************************************************/
436 static void received_unlock_msg(struct messaging_context *msg,
439 struct server_id server_id,
442 DEBUG(10,("received_unlock_msg (SMB2)\n"));
443 process_blocking_lock_queue_smb2();
446 /****************************************************************
447 Function to get the blr on a pending record.
448 *****************************************************************/
450 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
452 struct smbd_smb2_lock_state *state = NULL;
453 const uint8_t *inhdr;
458 if (smb2req->subreq == NULL) {
461 if (!tevent_req_is_in_progress(smb2req->subreq)) {
464 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
465 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
468 state = tevent_req_data(smb2req->subreq,
469 struct smbd_smb2_lock_state);
475 /****************************************************************
476 Set up the next brl timeout.
477 *****************************************************************/
479 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
481 struct smbd_smb2_request *smb2req;
482 struct timeval next_timeout = timeval_zero();
483 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
486 * If we already have a timeout event, don't replace it.
487 * It will fire before this one anyway.
490 if (sconn->smb2.locks.brl_timeout) {
491 DEBUG(10,("recalc_smb2_brl_timeout: timeout already exists\n"));
495 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
496 struct blocking_lock_record *blr =
497 get_pending_smb2req_blr(smb2req);
498 if (blr && blr->blocking_pid == 0xFFFFFFFF) {
500 * If we're blocked on pid 0xFFFFFFFF this is
501 * a POSIX lock, so calculate a timeout of
502 * 10 seconds into the future.
504 next_timeout = timeval_current_ofs(10, 0);
510 * To account for unclean shutdowns by clients we need a
511 * maximum timeout that we use for checking pending locks. If
512 * we have any pending locks at all, then check if the pending
513 * lock can continue at least every brl:recalctime seconds
514 * (default 5 seconds).
516 * This saves us needing to do a message_send_all() in the
517 * SIGCHLD handler in the parent daemon. That
518 * message_send_all() caused O(n^2) work to be done when IP
519 * failovers happened in clustered Samba, which could make the
520 * entire system unusable for many minutes.
523 if (max_brl_timeout > 0) {
524 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
525 next_timeout = timeval_brl_min(&next_timeout, &min_to);
528 if (timeval_is_zero(&next_timeout)) {
529 /* Infinite timeout - return. */
530 DEBUG(10, ("push_blocking_lock_request_smb2: Next "
531 "timeout = INFINITY\n"));
536 struct timeval cur, from_now;
538 cur = timeval_current();
539 from_now = timeval_until(&cur, &next_timeout);
540 DEBUG(10, ("push_blocking_lock_request_smb2: Next "
541 "timeout = %d.%d seconds from now.\n",
542 (int)from_now.tv_sec, (int)from_now.tv_usec));
545 sconn->smb2.locks.brl_timeout = event_add_timed(
546 smbd_event_context(),
551 if (!sconn->smb2.locks.brl_timeout) {
557 /****************************************************************
558 Get an SMB2 lock reqeust to go async. lock_timeout should
560 *****************************************************************/
562 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
563 struct smb_request *smb1req,
568 enum brl_type lock_type,
569 enum brl_flavour lock_flav,
572 uint32_t blocking_pid)
574 struct smbd_server_connection *sconn = smbd_server_conn;
575 struct smbd_smb2_request *smb2req = smb1req->smb2req;
576 struct tevent_req *req = NULL;
577 struct smbd_smb2_lock_state *state = NULL;
578 NTSTATUS status = NT_STATUS_OK;
580 SMB_ASSERT(lock_timeout == -1);
585 req = smb2req->subreq;
589 state = tevent_req_data(req, struct smbd_smb2_lock_state);
595 struct blocking_lock_record *blr = talloc_zero(state,
596 struct blocking_lock_record);
600 blr = talloc_zero(state, struct blocking_lock_record);
602 blr->expire_time.tv_sec = 0;
603 blr->expire_time.tv_usec = 0; /* Never expire. */
604 blr->lock_num = lock_num;
605 blr->lock_pid = lock_pid;
606 blr->blocking_pid = blocking_pid;
607 blr->lock_flav = lock_flav;
608 blr->lock_type = lock_type;
609 blr->offset = offset;
612 /* Specific brl_lock() implementations can fill this in. */
613 blr->blr_private = NULL;
615 /* Add a pending lock record for this. */
616 status = brl_lock(smbd_messaging_context(),
622 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
628 if (!NT_STATUS_IS_OK(status)) {
629 DEBUG(0,("push_blocking_lock_request_smb2: "
630 "failed to add PENDING_LOCK record.\n"));
637 recalc_smb2_brl_timeout(sconn);
639 /* Ensure we'll receive messages when this is unlocked. */
640 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
641 messaging_register(smbd_messaging_context(), NULL,
642 MSG_SMB_UNLOCK, received_unlock_msg);
643 sconn->smb2.locks.blocking_lock_unlock_state = true;
646 /* allow this request to be canceled */
647 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
652 /****************************************************************
653 Re-proccess a blocking lock request.
654 This is equivalent to process_lockingX() inside smbd/blocking.c
655 *****************************************************************/
657 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req)
660 struct blocking_lock_record *blr = NULL;
661 struct smbd_smb2_lock_state *state = NULL;
662 files_struct *fsp = NULL;
664 if (!smb2req->subreq) {
667 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
675 /* Try and finish off getting all the outstanding locks. */
677 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
678 struct byte_range_lock *br_lck = NULL;
679 struct smbd_lock_element *e = &state->locks[blr->lock_num];
681 br_lck = do_lock(smbd_messaging_context(),
695 if (NT_STATUS_IS_ERR(status)) {
700 if(blr->lock_num == state->lock_count) {
702 * Success - we got all the locks.
705 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
706 "fnum=%d num_locks=%d\n",
709 (int)state->lock_count));
711 tevent_req_done(smb2req->subreq);
715 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
716 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
718 * We have other than a "can't get lock"
719 * error. Return an error.
721 tevent_req_nterror(smb2req->subreq, status);
726 * Still can't get all the locks - keep waiting.
729 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
730 "for file %s, fnum = %d. Waiting....\n",
732 (int)state->lock_count,
740 /****************************************************************
741 Attempt to proccess all outstanding blocking locks pending on
743 *****************************************************************/
745 void process_blocking_lock_queue_smb2(void)
747 struct smbd_server_connection *sconn = smbd_server_conn;
748 struct smbd_smb2_request *smb2req, *nextreq;
750 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
751 const uint8_t *inhdr;
753 nextreq = smb2req->next;
755 if (smb2req->subreq == NULL) {
756 /* This message has been processed. */
759 if (!tevent_req_is_in_progress(smb2req->subreq)) {
760 /* This message has been processed. */
764 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
765 if (IVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
766 reprocess_blocked_smb2_lock(smb2req);
770 recalc_smb2_brl_timeout(sconn);
773 /****************************************************************************
774 Remove any locks on this fd. Called from file_close().
775 ****************************************************************************/
777 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
778 struct byte_range_lock *br_lck)
780 struct smbd_server_connection *sconn = smbd_server_conn;
781 struct smbd_smb2_request *smb2req, *nextreq;
783 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
784 struct smbd_smb2_lock_state *state = NULL;
785 files_struct *fsp_curr = NULL;
786 int i = smb2req->current_idx;
787 uint64_t in_file_id_volatile;
788 struct blocking_lock_record *blr = NULL;
789 const uint8_t *inhdr;
790 const uint8_t *inbody;
792 nextreq = smb2req->next;
794 if (smb2req->subreq == NULL) {
795 /* This message has been processed. */
798 if (!tevent_req_is_in_progress(smb2req->subreq)) {
799 /* This message has been processed. */
803 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
804 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
805 /* Not a lock call. */
809 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
810 in_file_id_volatile = BVAL(inbody, 0x10);
812 state = tevent_req_data(smb2req->subreq,
813 struct smbd_smb2_lock_state);
815 /* Strange - is this even possible ? */
819 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
820 if (fsp_curr == NULL) {
821 /* Strange - is this even possible ? */
825 if (fsp_curr != fsp) {
826 /* It's not our fid */
832 /* Remove the entries from the lock db. */
833 brl_lock_cancel(br_lck,
841 /* Finally cancel the request. */
842 tevent_req_cancel(smb2req->subreq);