2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "librpc/gen_ndr/messaging.h"
28 struct smbd_smb2_lock_element {
34 struct smbd_smb2_lock_state {
35 struct smbd_smb2_request *smb2req;
36 struct smb_request *smb1req;
37 struct blocking_lock_record *blr;
39 struct smbd_lock_element *locks;
42 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
43 struct blocking_lock_record *blr);
45 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
46 struct tevent_context *ev,
47 struct smbd_smb2_request *smb2req,
49 uint64_t in_file_id_volatile,
50 uint16_t in_lock_count,
51 struct smbd_smb2_lock_element *in_locks);
52 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
54 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
55 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
58 const uint8_t *inbody;
59 const int i = req->current_idx;
60 size_t expected_body_size = 0x30;
63 uint16_t in_lock_count;
64 uint64_t in_file_id_persistent;
65 uint64_t in_file_id_volatile;
66 struct smbd_smb2_lock_element *in_locks;
67 struct tevent_req *subreq;
68 const uint8_t *lock_buffer;
71 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
72 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
73 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
76 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
78 body_size = SVAL(inbody, 0x00);
79 if (body_size != expected_body_size) {
80 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
83 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
85 in_lock_count = CVAL(inbody, 0x02);
86 /* 0x04 - 4 bytes reserved */
87 in_file_id_persistent = BVAL(inbody, 0x08);
88 in_file_id_volatile = BVAL(inbody, 0x10);
90 if (in_lock_count < 1) {
91 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
94 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
95 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
98 if (req->compat_chain_fsp) {
100 } else if (in_file_id_persistent != in_file_id_volatile) {
101 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
104 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
106 if (in_locks == NULL) {
107 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
111 lock_buffer = inbody + 0x18;
113 in_locks[l].offset = BVAL(lock_buffer, 0x00);
114 in_locks[l].length = BVAL(lock_buffer, 0x08);
115 in_locks[l].flags = IVAL(lock_buffer, 0x10);
116 /* 0x14 - 4 reserved bytes */
118 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
120 for (l=1; l < in_lock_count; l++) {
121 in_locks[l].offset = BVAL(lock_buffer, 0x00);
122 in_locks[l].length = BVAL(lock_buffer, 0x08);
123 in_locks[l].flags = IVAL(lock_buffer, 0x10);
124 /* 0x14 - 4 reserved bytes */
129 subreq = smbd_smb2_lock_send(req,
130 req->sconn->smb2.event_ctx,
136 if (subreq == NULL) {
137 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
139 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
141 return smbd_smb2_request_pending_queue(req, subreq);
144 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
146 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
147 struct smbd_smb2_request);
150 NTSTATUS error; /* transport error */
152 if (smb2req->cancelled) {
153 const uint8_t *inhdr = (const uint8_t *)
154 smb2req->in.vector[smb2req->current_idx].iov_base;
155 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
156 struct smbd_smb2_lock_state *state;
158 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
159 (unsigned long long)mid ));
161 state = tevent_req_data(smb2req->subreq,
162 struct smbd_smb2_lock_state);
165 SMB_ASSERT(state->blr);
167 remove_pending_lock(state, state->blr);
169 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
170 if (!NT_STATUS_IS_OK(error)) {
171 smbd_server_connection_terminate(smb2req->sconn,
178 status = smbd_smb2_lock_recv(subreq);
180 if (!NT_STATUS_IS_OK(status)) {
181 error = smbd_smb2_request_error(smb2req, status);
182 if (!NT_STATUS_IS_OK(error)) {
183 smbd_server_connection_terminate(smb2req->sconn,
190 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
191 if (outbody.data == NULL) {
192 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
193 if (!NT_STATUS_IS_OK(error)) {
194 smbd_server_connection_terminate(smb2req->sconn,
201 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
202 SSVAL(outbody.data, 0x02, 0); /* reserved */
204 error = smbd_smb2_request_done(smb2req, outbody, NULL);
205 if (!NT_STATUS_IS_OK(error)) {
206 smbd_server_connection_terminate(smb2req->sconn,
212 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
213 struct tevent_context *ev,
214 struct smbd_smb2_request *smb2req,
216 uint64_t in_file_id_volatile,
217 uint16_t in_lock_count,
218 struct smbd_smb2_lock_element *in_locks)
220 struct tevent_req *req;
221 struct smbd_smb2_lock_state *state;
222 struct smb_request *smb1req;
223 connection_struct *conn = smb2req->tcon->compat_conn;
225 int32_t timeout = -1;
226 bool isunlock = false;
228 struct smbd_lock_element *locks;
232 req = tevent_req_create(mem_ctx, &state,
233 struct smbd_smb2_lock_state);
237 state->smb2req = smb2req;
238 smb2req->subreq = req; /* So we can find this when going async. */
240 smb1req = smbd_smb2_fake_smb_request(smb2req);
241 if (tevent_req_nomem(smb1req, req)) {
242 return tevent_req_post(req, ev);
244 state->smb1req = smb1req;
246 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
247 (unsigned long long)in_file_id_volatile));
249 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
251 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
252 return tevent_req_post(req, ev);
254 if (conn != fsp->conn) {
255 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
256 return tevent_req_post(req, ev);
258 if (smb2req->session->vuid != fsp->vuid) {
259 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
260 return tevent_req_post(req, ev);
263 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
265 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
266 return tevent_req_post(req, ev);
269 switch (in_locks[0].flags) {
270 case SMB2_LOCK_FLAG_SHARED:
271 case SMB2_LOCK_FLAG_EXCLUSIVE:
272 if (in_lock_count > 1) {
273 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
274 return tevent_req_post(req, ev);
279 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
280 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
284 case SMB2_LOCK_FLAG_UNLOCK:
285 /* only the first lock gives the UNLOCK bit - see
292 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
293 return tevent_req_post(req, ev);
296 for (i=0; i<in_lock_count; i++) {
297 bool invalid = false;
299 switch (in_locks[i].flags) {
300 case SMB2_LOCK_FLAG_SHARED:
301 case SMB2_LOCK_FLAG_EXCLUSIVE:
307 tevent_req_nterror(req,
308 NT_STATUS_INVALID_PARAMETER);
309 return tevent_req_post(req, ev);
313 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
314 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
320 case SMB2_LOCK_FLAG_UNLOCK:
322 tevent_req_nterror(req,
323 NT_STATUS_INVALID_PARAMETER);
324 return tevent_req_post(req, ev);
331 * is the first element was a UNLOCK
332 * we need to deferr the error response
333 * to the backend, because we need to process
334 * all unlock elements before
339 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
340 return tevent_req_post(req, ev);
343 locks[i].smblctx = in_file_id_volatile;
344 locks[i].offset = in_locks[i].offset;
345 locks[i].count = in_locks[i].length;
347 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
348 locks[i].brltype = WRITE_LOCK;
349 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
350 locks[i].brltype = READ_LOCK;
351 } else if (invalid) {
353 * this is an invalid UNLOCK element
354 * and the backend needs to test for
355 * brltype != UNLOCK_LOCK and return
356 * NT_STATUS_INVALID_PARAMER
358 locks[i].brltype = READ_LOCK;
360 locks[i].brltype = UNLOCK_LOCK;
363 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
364 "smblctx = %llu type %d\n",
366 (unsigned long long)locks[i].offset,
367 (unsigned long long)locks[i].count,
368 (unsigned long long)locks[i].smblctx,
369 (int)locks[i].brltype ));
372 state->locks = locks;
373 state->lock_count = in_lock_count;
376 status = smbd_do_locking(smb1req, fsp,
385 status = smbd_do_locking(smb1req, fsp,
394 if (!NT_STATUS_IS_OK(status)) {
395 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
396 status = NT_STATUS_LOCK_NOT_GRANTED;
398 tevent_req_nterror(req, status);
399 return tevent_req_post(req, ev);
406 tevent_req_done(req);
407 return tevent_req_post(req, ev);
410 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
414 if (tevent_req_is_nterror(req, &status)) {
415 tevent_req_received(req);
419 tevent_req_received(req);
423 /****************************************************************
424 Cancel an outstanding blocking lock request.
425 *****************************************************************/
427 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
429 struct smbd_smb2_request *smb2req = NULL;
430 struct smbd_smb2_lock_state *state = tevent_req_data(req,
431 struct smbd_smb2_lock_state);
436 if (!state->smb2req) {
440 smb2req = state->smb2req;
441 smb2req->cancelled = true;
443 tevent_req_done(req);
447 /****************************************************************
448 Got a message saying someone unlocked a file. Re-schedule all
449 blocking lock requests as we don't know if anything overlapped.
450 *****************************************************************/
452 static void received_unlock_msg(struct messaging_context *msg,
455 struct server_id server_id,
458 struct smbd_server_connection *sconn;
460 DEBUG(10,("received_unlock_msg (SMB2)\n"));
462 sconn = msg_ctx_to_sconn(msg);
464 DEBUG(1, ("could not find sconn\n"));
467 process_blocking_lock_queue_smb2(sconn, timeval_current());
470 /****************************************************************
471 Function to get the blr on a pending record.
472 *****************************************************************/
474 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
476 struct smbd_smb2_lock_state *state = NULL;
477 const uint8_t *inhdr;
482 if (smb2req->subreq == NULL) {
485 if (!tevent_req_is_in_progress(smb2req->subreq)) {
488 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
489 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
492 state = tevent_req_data(smb2req->subreq,
493 struct smbd_smb2_lock_state);
499 /****************************************************************
500 Set up the next brl timeout.
501 *****************************************************************/
503 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
505 struct smbd_smb2_request *smb2req;
506 struct timeval next_timeout = timeval_zero();
507 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
509 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
511 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
512 struct blocking_lock_record *blr =
513 get_pending_smb2req_blr(smb2req);
517 if (timeval_is_zero(&blr->expire_time)) {
519 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
520 * a POSIX lock, so calculate a timeout of
521 * 10 seconds into the future.
523 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
524 struct timeval psx_to = timeval_current_ofs(10, 0);
525 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
531 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
534 if (timeval_is_zero(&next_timeout)) {
535 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
536 "timeout = Infinite.\n"));
541 * To account for unclean shutdowns by clients we need a
542 * maximum timeout that we use for checking pending locks. If
543 * we have any pending locks at all, then check if the pending
544 * lock can continue at least every brl:recalctime seconds
545 * (default 5 seconds).
547 * This saves us needing to do a message_send_all() in the
548 * SIGCHLD handler in the parent daemon. That
549 * message_send_all() caused O(n^2) work to be done when IP
550 * failovers happened in clustered Samba, which could make the
551 * entire system unusable for many minutes.
554 if (max_brl_timeout > 0) {
555 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
556 next_timeout = timeval_brl_min(&next_timeout, &min_to);
560 struct timeval cur, from_now;
562 cur = timeval_current();
563 from_now = timeval_until(&cur, &next_timeout);
564 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
565 "timeout = %d.%d seconds from now.\n",
566 (int)from_now.tv_sec, (int)from_now.tv_usec));
569 sconn->smb2.locks.brl_timeout = event_add_timed(
570 smbd_event_context(),
575 if (!sconn->smb2.locks.brl_timeout) {
581 /****************************************************************
582 Get an SMB2 lock reqeust to go async. lock_timeout should
584 *****************************************************************/
586 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
587 struct smb_request *smb1req,
592 enum brl_type lock_type,
593 enum brl_flavour lock_flav,
596 uint64_t blocking_smblctx)
598 struct smbd_server_connection *sconn = smb1req->sconn;
599 struct smbd_smb2_request *smb2req = smb1req->smb2req;
600 struct tevent_req *req = NULL;
601 struct smbd_smb2_lock_state *state = NULL;
602 struct blocking_lock_record *blr = NULL;
603 NTSTATUS status = NT_STATUS_OK;
608 req = smb2req->subreq;
612 if (!tevent_req_is_in_progress(smb2req->subreq)) {
615 state = tevent_req_data(req, struct smbd_smb2_lock_state);
620 blr = talloc_zero(state, struct blocking_lock_record);
626 if (lock_timeout == -1) {
627 blr->expire_time.tv_sec = 0;
628 blr->expire_time.tv_usec = 0; /* Never expire. */
630 blr->expire_time = timeval_current_ofs(
632 (lock_timeout % 1000) * 1000);
635 blr->lock_num = lock_num;
636 blr->smblctx = smblctx;
637 blr->blocking_smblctx = blocking_smblctx;
638 blr->lock_flav = lock_flav;
639 blr->lock_type = lock_type;
640 blr->offset = offset;
643 /* Specific brl_lock() implementations can fill this in. */
644 blr->blr_private = NULL;
646 /* Add a pending lock record for this. */
647 status = brl_lock(sconn->msg_ctx,
650 sconn_server_id(sconn),
653 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
659 if (!NT_STATUS_IS_OK(status)) {
660 DEBUG(0,("push_blocking_lock_request_smb2: "
661 "failed to add PENDING_LOCK record.\n"));
667 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
671 recalc_smb2_brl_timeout(sconn);
673 /* Ensure we'll receive messages when this is unlocked. */
674 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
675 messaging_register(sconn->msg_ctx, NULL,
676 MSG_SMB_UNLOCK, received_unlock_msg);
677 sconn->smb2.locks.blocking_lock_unlock_state = true;
680 /* allow this request to be canceled */
681 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
686 /****************************************************************
687 Remove a pending lock record under lock.
688 *****************************************************************/
690 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
691 struct blocking_lock_record *blr)
694 struct byte_range_lock *br_lck = brl_get_locks(
697 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
700 brl_lock_cancel(br_lck,
702 sconn_server_id(blr->fsp->conn->sconn),
710 /* Remove the locks we already got. */
712 for(i = blr->lock_num - 1; i >= 0; i--) {
713 struct smbd_lock_element *e = &state->locks[i];
715 do_unlock(blr->fsp->conn->sconn->msg_ctx,
724 /****************************************************************
725 Re-proccess a blocking lock request.
726 This is equivalent to process_lockingX() inside smbd/blocking.c
727 *****************************************************************/
729 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
730 struct timeval tv_curr)
732 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
733 struct blocking_lock_record *blr = NULL;
734 struct smbd_smb2_lock_state *state = NULL;
735 files_struct *fsp = NULL;
737 if (!smb2req->subreq) {
740 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
748 /* Try and finish off getting all the outstanding locks. */
750 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
751 struct byte_range_lock *br_lck = NULL;
752 struct smbd_lock_element *e = &state->locks[blr->lock_num];
754 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
763 &blr->blocking_smblctx,
768 if (NT_STATUS_IS_ERR(status)) {
773 if(blr->lock_num == state->lock_count) {
775 * Success - we got all the locks.
778 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
779 "fnum=%d num_locks=%d\n",
782 (int)state->lock_count));
784 tevent_req_done(smb2req->subreq);
788 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
789 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
791 * We have other than a "can't get lock"
792 * error. Return an error.
794 remove_pending_lock(state, blr);
795 tevent_req_nterror(smb2req->subreq, status);
800 * We couldn't get the locks for this record on the list.
801 * If the time has expired, return a lock error.
804 if (!timeval_is_zero(&blr->expire_time) &&
805 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
806 remove_pending_lock(state, blr);
807 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
812 * Still can't get all the locks - keep waiting.
815 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
816 "for file %s, fnum = %d. Still waiting....\n",
818 (int)state->lock_count,
826 /****************************************************************
827 Attempt to proccess all outstanding blocking locks pending on
829 *****************************************************************/
831 void process_blocking_lock_queue_smb2(
832 struct smbd_server_connection *sconn, struct timeval tv_curr)
834 struct smbd_smb2_request *smb2req, *nextreq;
836 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
837 const uint8_t *inhdr;
839 nextreq = smb2req->next;
841 if (smb2req->subreq == NULL) {
842 /* This message has been processed. */
845 if (!tevent_req_is_in_progress(smb2req->subreq)) {
846 /* This message has been processed. */
850 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
851 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
852 reprocess_blocked_smb2_lock(smb2req, tv_curr);
856 recalc_smb2_brl_timeout(sconn);
859 /****************************************************************************
860 Remove any locks on this fd. Called from file_close().
861 ****************************************************************************/
863 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
864 struct byte_range_lock *br_lck,
865 enum file_close_type close_type)
867 struct smbd_server_connection *sconn = fsp->conn->sconn;
868 struct smbd_smb2_request *smb2req, *nextreq;
870 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
871 struct smbd_smb2_lock_state *state = NULL;
872 files_struct *fsp_curr = NULL;
873 int i = smb2req->current_idx;
874 uint64_t in_file_id_volatile;
875 struct blocking_lock_record *blr = NULL;
876 const uint8_t *inhdr;
877 const uint8_t *inbody;
879 nextreq = smb2req->next;
881 if (smb2req->subreq == NULL) {
882 /* This message has been processed. */
885 if (!tevent_req_is_in_progress(smb2req->subreq)) {
886 /* This message has been processed. */
890 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
891 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
892 /* Not a lock call. */
896 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
897 in_file_id_volatile = BVAL(inbody, 0x10);
899 state = tevent_req_data(smb2req->subreq,
900 struct smbd_smb2_lock_state);
902 /* Strange - is this even possible ? */
906 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
907 if (fsp_curr == NULL) {
908 /* Strange - is this even possible ? */
912 if (fsp_curr != fsp) {
913 /* It's not our fid */
919 /* Remove the entries from the lock db. */
920 brl_lock_cancel(br_lck,
922 sconn_server_id(sconn),
928 /* Finally end the request. */
929 if (close_type == SHUTDOWN_CLOSE) {
930 tevent_req_done(smb2req->subreq);
932 tevent_req_nterror(smb2req->subreq,
933 NT_STATUS_RANGE_NOT_LOCKED);