2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "smbd/smbd.h"
22 #include "smbd/globals.h"
24 #include "lib/util/tevent_ntstatus.h"
25 #include "lib/dbwrap/dbwrap_watch.h"
26 #include "librpc/gen_ndr/ndr_open_files.h"
29 #define DBGC_CLASS DBGC_LOCKING
31 NTSTATUS smbd_do_locks_try(
32 struct files_struct *fsp,
33 enum brl_flavour lock_flav,
35 struct smbd_lock_element *locks,
36 uint16_t *blocker_idx,
37 struct server_id *blocking_pid,
38 uint64_t *blocking_smblctx)
40 NTSTATUS status = NT_STATUS_OK;
43 for (i=0; i<num_locks; i++) {
44 struct smbd_lock_element *e = &locks[i];
48 locks, /* req_mem_ctx */
57 if (!NT_STATUS_IS_OK(status)) {
62 if (NT_STATUS_IS_OK(status)) {
69 * Undo the locks we successfully got
71 for (i = i-1; i != UINT16_MAX; i--) {
72 struct smbd_lock_element *e = &locks[i];
83 static bool smbd_smb1_fsp_add_blocked_lock_req(
84 struct files_struct *fsp, struct tevent_req *req)
86 size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
87 struct tevent_req **tmp = NULL;
91 fsp->blocked_smb1_lock_reqs,
97 fsp->blocked_smb1_lock_reqs = tmp;
98 fsp->blocked_smb1_lock_reqs[num_reqs] = req;
102 struct smbd_smb1_do_locks_state {
103 struct tevent_context *ev;
104 struct smb_request *smbreq;
105 struct files_struct *fsp;
107 uint32_t polling_msecs;
108 uint32_t retry_msecs;
109 struct timeval endtime;
110 bool large_offset; /* required for correct cancel */
111 enum brl_flavour lock_flav;
113 struct smbd_lock_element *locks;
115 NTSTATUS deny_status;
118 static void smbd_smb1_do_locks_try(struct tevent_req *req);
119 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
120 static void smbd_smb1_blocked_locks_cleanup(
121 struct tevent_req *req, enum tevent_req_state req_state);
122 static NTSTATUS smbd_smb1_do_locks_check(
123 struct files_struct *fsp,
124 enum brl_flavour lock_flav,
126 struct smbd_lock_element *locks,
127 uint16_t *blocker_idx,
128 struct server_id *blocking_pid,
129 uint64_t *blocking_smblctx);
131 static void smbd_smb1_do_locks_setup_timeout(
132 struct smbd_smb1_do_locks_state *state,
133 const struct smbd_lock_element *blocker)
135 struct files_struct *fsp = state->fsp;
137 if (!timeval_is_zero(&state->endtime)) {
144 if ((state->timeout != 0) && (state->timeout != UINT32_MAX)) {
146 * Windows internal resolution for blocking locks
147 * seems to be about 200ms... Don't wait for less than
150 state->timeout = MAX(state->timeout, lp_lock_spin_time());
153 if (state->timeout != 0) {
157 if (blocker == NULL) {
161 if ((blocker->offset >= 0xEF000000) &&
162 ((blocker->offset >> 63) == 0)) {
164 * This must be an optimization of an ancient
167 state->timeout = lp_lock_spin_time();
170 if (fsp->fsp_flags.lock_failure_seen &&
171 (blocker->offset == fsp->lock_failure_offset)) {
173 * Delay repeated lock attempts on the same
174 * lock. Maybe a more advanced version of the
177 DBG_DEBUG("Delaying lock request due to previous "
179 state->timeout = lp_lock_spin_time();
184 * Note state->timeout might still 0,
185 * but that's ok, as we don't want to retry
188 state->endtime = timeval_add(&state->smbreq->request_time,
189 state->timeout / 1000,
190 (state->timeout % 1000) * 1000);
193 static void smbd_smb1_do_locks_update_retry_msecs(
194 struct smbd_smb1_do_locks_state *state)
197 * The default lp_lock_spin_time() is 200ms,
198 * we just use half of it to trigger the first retry.
200 * v_min is in the range of 0.001 to 10 secs
201 * (0.1 secs by default)
203 * v_max is in the range of 0.01 to 100 secs
204 * (1.0 secs by default)
206 * The typical steps are:
207 * 0.1, 0.2, 0.3, 0.4, ... 1.0
209 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
210 uint32_t v_max = 10 * v_min;
212 if (state->retry_msecs >= v_max) {
213 state->retry_msecs = v_max;
217 state->retry_msecs += v_min;
220 static void smbd_smb1_do_locks_update_polling_msecs(
221 struct smbd_smb1_do_locks_state *state)
224 * The default lp_lock_spin_time() is 200ms.
226 * v_min is in the range of 0.002 to 20 secs
227 * (0.2 secs by default)
229 * v_max is in the range of 0.02 to 200 secs
230 * (2.0 secs by default)
232 * The typical steps are:
233 * 0.2, 0.4, 0.6, 0.8, ... 2.0
235 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
236 uint32_t v_max = 10 * v_min;
238 if (state->polling_msecs >= v_max) {
239 state->polling_msecs = v_max;
243 state->polling_msecs += v_min;
246 struct tevent_req *smbd_smb1_do_locks_send(
248 struct tevent_context *ev,
249 struct smb_request **smbreq, /* talloc_move()d into our state */
250 struct files_struct *fsp,
251 uint32_t lock_timeout,
253 enum brl_flavour lock_flav,
255 struct smbd_lock_element *locks)
257 struct tevent_req *req = NULL;
258 struct smbd_smb1_do_locks_state *state = NULL;
261 req = tevent_req_create(
262 mem_ctx, &state, struct smbd_smb1_do_locks_state);
267 state->smbreq = talloc_move(state, smbreq);
269 state->timeout = lock_timeout;
270 state->large_offset = large_offset;
271 state->lock_flav = lock_flav;
272 state->num_locks = num_locks;
273 state->locks = locks;
275 if (lock_flav == POSIX_LOCK) {
277 * SMB1 posix locks always use
278 * NT_STATUS_FILE_LOCK_CONFLICT.
280 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
282 state->deny_status = NT_STATUS_LOCK_NOT_GRANTED;
285 DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
287 if (num_locks == 0) {
288 DBG_DEBUG("no locks\n");
289 tevent_req_done(req);
290 return tevent_req_post(req, ev);
293 smbd_smb1_do_locks_try(req);
294 if (!tevent_req_is_in_progress(req)) {
295 return tevent_req_post(req, ev);
298 ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
301 return tevent_req_post(req, ev);
303 tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
307 static void smbd_smb1_blocked_locks_cleanup(
308 struct tevent_req *req, enum tevent_req_state req_state)
310 struct smbd_smb1_do_locks_state *state = tevent_req_data(
311 req, struct smbd_smb1_do_locks_state);
312 struct files_struct *fsp = state->fsp;
313 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
314 size_t num_blocked = talloc_array_length(blocked);
317 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
322 if (req_state == TEVENT_REQ_RECEIVED) {
323 DBG_DEBUG("already received\n");
327 for (i=0; i<num_blocked; i++) {
328 if (blocked[i] == req) {
332 SMB_ASSERT(i<num_blocked);
334 ARRAY_DEL_ELEMENT(blocked, i, num_blocked);
336 fsp->blocked_smb1_lock_reqs = talloc_realloc(
337 fsp, blocked, struct tevent_req *, num_blocked-1);
340 static NTSTATUS smbd_smb1_do_locks_check_blocked(
341 uint16_t num_blocked,
342 struct smbd_lock_element *blocked,
344 struct smbd_lock_element *locks,
345 uint16_t *blocker_idx,
346 uint64_t *blocking_smblctx)
350 for (li=0; li < num_locks; li++) {
351 struct smbd_lock_element *l = &locks[li];
355 valid = byte_range_valid(l->offset, l->count);
357 return NT_STATUS_INVALID_LOCK_RANGE;
360 for (bi = 0; bi < num_blocked; bi++) {
361 struct smbd_lock_element *b = &blocked[li];
364 /* Read locks never conflict. */
365 if (l->brltype == READ_LOCK && b->brltype == READ_LOCK) {
369 overlap = byte_range_overlap(l->offset,
378 *blocking_smblctx = b->smblctx;
379 return NT_STATUS_LOCK_NOT_GRANTED;
386 static NTSTATUS smbd_smb1_do_locks_check(
387 struct files_struct *fsp,
388 enum brl_flavour lock_flav,
390 struct smbd_lock_element *locks,
391 uint16_t *blocker_idx,
392 struct server_id *blocking_pid,
393 uint64_t *blocking_smblctx)
395 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
396 size_t num_blocked = talloc_array_length(blocked);
401 * We check the pending/blocked requests
402 * from the oldest to the youngest request.
404 * Note due to the retry logic the current request
405 * might already be in the list.
408 for (bi = 0; bi < num_blocked; bi++) {
409 struct smbd_smb1_do_locks_state *blocked_state =
410 tevent_req_data(blocked[bi],
411 struct smbd_smb1_do_locks_state);
413 if (blocked_state->locks == locks) {
414 SMB_ASSERT(blocked_state->num_locks == num_locks);
415 SMB_ASSERT(blocked_state->lock_flav == lock_flav);
418 * We found ourself...
423 status = smbd_smb1_do_locks_check_blocked(
424 blocked_state->num_locks,
425 blocked_state->locks,
430 if (!NT_STATUS_IS_OK(status)) {
431 *blocking_pid = messaging_server_id(
432 fsp->conn->sconn->msg_ctx);
437 status = smbd_do_locks_try(
445 if (!NT_STATUS_IS_OK(status)) {
452 static void smbd_smb1_do_locks_try(struct tevent_req *req)
454 struct smbd_smb1_do_locks_state *state = tevent_req_data(
455 req, struct smbd_smb1_do_locks_state);
456 struct files_struct *fsp = state->fsp;
457 struct share_mode_lock *lck;
458 struct timeval endtime = { 0 };
459 struct server_id blocking_pid = { 0 };
460 uint64_t blocking_smblctx = 0;
461 struct tevent_req *subreq = NULL;
466 lck = get_existing_share_mode_lock(state, fsp->file_id);
467 if (tevent_req_nomem(lck, req)) {
468 DBG_DEBUG("Could not get share mode lock\n");
472 status = smbd_smb1_do_locks_check(
480 if (NT_STATUS_IS_OK(status)) {
483 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
485 * We got NT_STATUS_RETRY,
486 * we reset polling_msecs so that
487 * that the retries based on LOCK_NOT_GRANTED
488 * will later start with small intervalls again.
490 state->polling_msecs = 0;
493 * The backend wasn't able to decide yet.
494 * We need to wait even for non-blocking
497 * The backend uses blocking_smblctx == UINT64_MAX
498 * to indicate that we should use retry timers.
500 * It uses blocking_smblctx == 0 to indicate
501 * it will use share_mode_wakeup_waiters()
502 * to wake us. Note that unrelated changes in
503 * locking.tdb may cause retries.
506 if (blocking_smblctx != UINT64_MAX) {
507 SMB_ASSERT(blocking_smblctx == 0);
511 smbd_smb1_do_locks_update_retry_msecs(state);
513 DBG_DEBUG("Waiting for a backend decision. "
514 "Retry in %"PRIu32" msecs\n",
518 * We completely ignore state->endtime here
519 * we we'll wait for a backend decision forever.
520 * If the backend is smart enough to implement
521 * some NT_STATUS_RETRY logic, it has to
522 * switch to any other status after in order
523 * to avoid waiting forever.
525 endtime = timeval_current_ofs_msec(state->retry_msecs);
528 if (!ERROR_WAS_LOCK_DENIED(status)) {
532 * We got LOCK_NOT_GRANTED, make sure
533 * a following STATUS_RETRY will start
534 * with short intervalls again.
536 state->retry_msecs = 0;
538 smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
539 DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
544 * The client specified timeout expired
545 * avoid further retries.
547 * Otherwise keep waiting either waiting
548 * for changes in locking.tdb or the polling
549 * mode timers waiting for posix locks.
551 * If the endtime is not expired yet,
552 * it means we'll retry after a timeout.
553 * In that case we'll have to return
554 * NT_STATUS_FILE_LOCK_CONFLICT
555 * instead of NT_STATUS_LOCK_NOT_GRANTED.
557 expired = timeval_expired(&state->endtime);
559 status = state->deny_status;
562 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
564 endtime = state->endtime;
566 if (blocking_smblctx == UINT64_MAX) {
569 smbd_smb1_do_locks_update_polling_msecs(state);
571 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
572 state->polling_msecs);
574 tmp = timeval_current_ofs_msec(state->polling_msecs);
575 endtime = timeval_min(&endtime, &tmp);
579 subreq = share_mode_watch_send(
580 state, state->ev, lck->data->id, blocking_pid);
581 if (tevent_req_nomem(subreq, req)) {
585 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
587 if (timeval_is_zero(&endtime)) {
591 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
593 status = NT_STATUS_NO_MEMORY;
599 smbd_smb1_brl_finish_by_req(req, status);
602 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
604 struct tevent_req *req = tevent_req_callback_data(
605 subreq, struct tevent_req);
606 struct smbd_smb1_do_locks_state *state = tevent_req_data(
607 req, struct smbd_smb1_do_locks_state);
612 * Make sure we run as the user again
614 ok = change_to_user_and_service_by_fsp(state->fsp);
616 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
620 status = share_mode_watch_recv(subreq, NULL, NULL);
623 DBG_DEBUG("share_mode_watch_recv returned %s\n",
627 * We ignore any errors here, it's most likely
628 * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
630 * In any case we can just give it a retry.
633 smbd_smb1_do_locks_try(req);
636 NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
638 struct smbd_smb1_do_locks_state *state = tevent_req_data(
639 req, struct smbd_smb1_do_locks_state);
640 NTSTATUS status = NT_STATUS_OK;
643 err = tevent_req_is_nterror(req, &status);
645 DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
647 if (tevent_req_is_nterror(req, &status)) {
648 struct files_struct *fsp = state->fsp;
649 struct smbd_lock_element *blocker =
650 &state->locks[state->blocker];
652 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
655 fsp->fsp_flags.lock_failure_seen = true;
656 fsp->lock_failure_offset = blocker->offset;
660 tevent_req_received(req);
665 bool smbd_smb1_do_locks_extract_smbreq(
666 struct tevent_req *req,
668 struct smb_request **psmbreq)
670 struct smbd_smb1_do_locks_state *state = tevent_req_data(
671 req, struct smbd_smb1_do_locks_state);
673 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
678 if (state->smbreq == NULL) {
681 *psmbreq = talloc_move(mem_ctx, &state->smbreq);
685 void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
687 DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
689 if (NT_STATUS_IS_OK(status)) {
690 tevent_req_done(req);
692 tevent_req_nterror(req, status);
696 bool smbd_smb1_brl_finish_by_lock(
697 struct files_struct *fsp,
699 enum brl_flavour lock_flav,
700 struct smbd_lock_element lock,
701 NTSTATUS finish_status)
703 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
704 size_t num_blocked = talloc_array_length(blocked);
707 DBG_DEBUG("num_blocked=%zu\n", num_blocked);
709 for (i=0; i<num_blocked; i++) {
710 struct tevent_req *req = blocked[i];
711 struct smbd_smb1_do_locks_state *state = tevent_req_data(
712 req, struct smbd_smb1_do_locks_state);
715 DBG_DEBUG("i=%zu, req=%p\n", i, req);
717 if ((state->large_offset != large_offset) ||
718 (state->lock_flav != lock_flav)) {
722 for (j=0; j<state->num_locks; j++) {
723 struct smbd_lock_element *l = &state->locks[j];
725 if ((lock.smblctx == l->smblctx) &&
726 (lock.offset == l->offset) &&
727 (lock.count == l->count)) {
728 smbd_smb1_brl_finish_by_req(
737 static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
738 struct files_struct *fsp, void *private_data)
740 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
741 size_t num_blocked = talloc_array_length(blocked);
742 uint64_t mid = *((uint64_t *)private_data);
745 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
747 for (i=0; i<num_blocked; i++) {
748 struct tevent_req *req = blocked[i];
749 struct smbd_smb1_do_locks_state *state = tevent_req_data(
750 req, struct smbd_smb1_do_locks_state);
751 struct smb_request *smbreq = state->smbreq;
753 if (smbreq->mid == mid) {
754 tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
763 * This walks the list of fsps, we store the blocked reqs attached to
764 * them. It can be expensive, but this is legacy SMB1 and trying to
765 * remember looking at traces I don't reall many of those calls.
768 bool smbd_smb1_brl_finish_by_mid(
769 struct smbd_server_connection *sconn, uint64_t mid)
771 struct files_struct *found = files_forall(
772 sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
773 return (found != NULL);