2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "smbd/globals.h"
23 #include "../libcli/smb/smb_common.h"
25 struct smbd_smb2_lock_element {
31 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
32 struct tevent_context *ev,
33 struct smbd_smb2_request *smb2req,
35 uint64_t in_file_id_volatile,
36 uint16_t in_lock_count,
37 struct smbd_smb2_lock_element *in_locks);
38 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
40 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
41 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
44 const uint8_t *inbody;
45 const int i = req->current_idx;
46 size_t expected_body_size = 0x30;
49 uint16_t in_lock_count;
50 uint64_t in_file_id_persistent;
51 uint64_t in_file_id_volatile;
52 struct smbd_smb2_lock_element *in_locks;
53 struct tevent_req *subreq;
54 const uint8_t *lock_buffer;
57 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
58 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
59 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
62 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
64 body_size = SVAL(inbody, 0x00);
65 if (body_size != expected_body_size) {
66 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
69 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
71 in_lock_count = CVAL(inbody, 0x02);
72 /* 0x04 - 4 bytes reserved */
73 in_file_id_persistent = BVAL(inbody, 0x08);
74 in_file_id_volatile = BVAL(inbody, 0x10);
76 if (in_lock_count < 1) {
77 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
80 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
81 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
84 if (req->compat_chain_fsp) {
86 } else if (in_file_id_persistent != 0) {
87 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
90 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
92 if (in_locks == NULL) {
93 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
97 lock_buffer = inbody + 0x18;
99 in_locks[l].offset = BVAL(lock_buffer, 0x00);
100 in_locks[l].length = BVAL(lock_buffer, 0x08);
101 in_locks[l].flags = IVAL(lock_buffer, 0x10);
102 /* 0x14 - 4 reserved bytes */
104 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
106 for (l=1; l < in_lock_count; l++) {
107 in_locks[l].offset = BVAL(lock_buffer, 0x00);
108 in_locks[l].length = BVAL(lock_buffer, 0x08);
109 in_locks[l].flags = IVAL(lock_buffer, 0x10);
110 /* 0x14 - 4 reserved bytes */
115 subreq = smbd_smb2_lock_send(req,
116 req->sconn->smb2.event_ctx,
122 if (subreq == NULL) {
123 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
125 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
127 return smbd_smb2_request_pending_queue(req, subreq);
130 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
132 struct smbd_smb2_request *req = tevent_req_callback_data(subreq,
133 struct smbd_smb2_request);
136 NTSTATUS error; /* transport error */
138 if (req->cancelled) {
139 const uint8_t *inhdr = (const uint8_t *)
140 req->in.vector[req->current_idx].iov_base;
141 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
143 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
144 (unsigned long long)mid ));
145 error = smbd_smb2_request_error(req, NT_STATUS_CANCELLED);
146 if (!NT_STATUS_IS_OK(error)) {
147 smbd_server_connection_terminate(req->sconn,
154 status = smbd_smb2_lock_recv(subreq);
156 if (!NT_STATUS_IS_OK(status)) {
157 error = smbd_smb2_request_error(req, status);
158 if (!NT_STATUS_IS_OK(error)) {
159 smbd_server_connection_terminate(req->sconn,
166 outbody = data_blob_talloc(req->out.vector, NULL, 0x04);
167 if (outbody.data == NULL) {
168 error = smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
169 if (!NT_STATUS_IS_OK(error)) {
170 smbd_server_connection_terminate(req->sconn,
177 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
178 SSVAL(outbody.data, 0x02, 0); /* reserved */
180 error = smbd_smb2_request_done(req, outbody, NULL);
181 if (!NT_STATUS_IS_OK(error)) {
182 smbd_server_connection_terminate(req->sconn,
188 struct smbd_smb2_lock_state {
189 struct smbd_smb2_request *smb2req;
190 struct smb_request *smb1req;
191 struct blocking_lock_record *blr;
193 struct smbd_lock_element *locks;
196 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
197 struct tevent_context *ev,
198 struct smbd_smb2_request *smb2req,
200 uint64_t in_file_id_volatile,
201 uint16_t in_lock_count,
202 struct smbd_smb2_lock_element *in_locks)
204 struct tevent_req *req;
205 struct smbd_smb2_lock_state *state;
206 struct smb_request *smb1req;
207 connection_struct *conn = smb2req->tcon->compat_conn;
209 int32_t timeout = -1;
210 bool isunlock = false;
212 struct smbd_lock_element *locks;
216 req = tevent_req_create(mem_ctx, &state,
217 struct smbd_smb2_lock_state);
221 state->smb2req = smb2req;
222 smb1req = smbd_smb2_fake_smb_request(smb2req);
223 if (tevent_req_nomem(smb1req, req)) {
224 return tevent_req_post(req, ev);
226 state->smb1req = smb1req;
228 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
229 (unsigned long long)in_file_id_volatile));
231 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
233 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
234 return tevent_req_post(req, ev);
236 if (conn != fsp->conn) {
237 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
238 return tevent_req_post(req, ev);
240 if (smb2req->session->vuid != fsp->vuid) {
241 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
242 return tevent_req_post(req, ev);
245 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
247 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
248 return tevent_req_post(req, ev);
251 switch (in_locks[0].flags) {
252 case SMB2_LOCK_FLAG_SHARED:
253 case SMB2_LOCK_FLAG_EXCLUSIVE:
254 if (in_lock_count > 1) {
255 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
256 return tevent_req_post(req, ev);
261 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
262 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
266 case SMB2_LOCK_FLAG_UNLOCK:
267 /* only the first lock gives the UNLOCK bit - see
274 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
275 return tevent_req_post(req, ev);
278 for (i=0; i<in_lock_count; i++) {
280 bool invalid = false;
282 switch (in_locks[i].flags) {
283 case SMB2_LOCK_FLAG_SHARED:
284 case SMB2_LOCK_FLAG_EXCLUSIVE:
286 tevent_req_nterror(req,
287 NT_STATUS_INVALID_PARAMETER);
288 return tevent_req_post(req, ev);
291 tevent_req_nterror(req,
292 NT_STATUS_INVALID_PARAMETER);
293 return tevent_req_post(req, ev);
297 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
298 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
300 tevent_req_nterror(req,
301 NT_STATUS_INVALID_PARAMETER);
302 return tevent_req_post(req, ev);
306 case SMB2_LOCK_FLAG_UNLOCK:
308 tevent_req_nterror(req,
309 NT_STATUS_INVALID_PARAMETER);
310 return tevent_req_post(req, ev);
317 * is the first element was a UNLOCK
318 * we need to deferr the error response
319 * to the backend, because we need to process
320 * all unlock elements before
325 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
326 return tevent_req_post(req, ev);
329 locks[i].smbpid = in_smbpid;
330 locks[i].offset = in_locks[i].offset;
331 locks[i].count = in_locks[i].length;
333 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
334 locks[i].brltype = WRITE_LOCK;
335 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
336 locks[i].brltype = READ_LOCK;
337 } else if (invalid) {
339 * this is an invalid UNLOCK element
340 * and the backend needs to test for
341 * brltype != UNLOCK_LOCK and return
342 * NT_STATUS_INVALID_PARAMER
344 locks[i].brltype = READ_LOCK;
346 locks[i].brltype = UNLOCK_LOCK;
349 max_count = UINT64_MAX - locks[i].offset;
350 if (locks[i].count > max_count) {
351 tevent_req_nterror(req, NT_STATUS_INVALID_LOCK_RANGE);
352 return tevent_req_post(req, ev);
356 state->locks = locks;
357 state->lock_count = in_lock_count;
360 status = smbd_do_locking(smb1req, fsp,
369 status = smbd_do_locking(smb1req, fsp,
378 if (!NT_STATUS_IS_OK(status)) {
379 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
380 status = NT_STATUS_LOCK_NOT_GRANTED;
382 tevent_req_nterror(req, status);
383 return tevent_req_post(req, ev);
390 tevent_req_done(req);
391 return tevent_req_post(req, ev);
394 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
398 if (tevent_req_is_nterror(req, &status)) {
399 tevent_req_received(req);
403 tevent_req_received(req);
407 /****************************************************************
408 Cancel an outstanding blocking lock request.
409 *****************************************************************/
411 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
413 struct smbd_smb2_request *smb2req = NULL;
414 struct smbd_smb2_lock_state *state = tevent_req_data(req,
415 struct smbd_smb2_lock_state);
420 if (!state->smb2req) {
424 smb2req = state->smb2req;
425 smb2req->cancelled = true;
427 tevent_req_done(req);
431 /****************************************************************
432 Got a message saying someone unlocked a file. Re-schedule all
433 blocking lock requests as we don't know if anything overlapped.
434 *****************************************************************/
436 static void received_unlock_msg(struct messaging_context *msg,
439 struct server_id server_id,
442 DEBUG(10,("received_unlock_msg (SMB2)\n"));
443 process_blocking_lock_queue_smb2();
446 /****************************************************************
447 Function to get the blr on a pending record.
448 *****************************************************************/
450 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
452 struct smbd_smb2_lock_state *state = NULL;
453 const uint8_t *inhdr;
458 if (smb2req->subreq == NULL) {
461 if (!tevent_req_is_in_progress(smb2req->subreq)) {
464 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
465 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
468 state = tevent_req_data(smb2req->subreq,
469 struct smbd_smb2_lock_state);
475 /****************************************************************
476 Set up the next brl timeout.
477 *****************************************************************/
479 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
481 struct smbd_smb2_request *smb2req;
482 struct timeval next_timeout;
483 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
485 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
487 next_timeout = timeval_zero();
489 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
490 struct blocking_lock_record *blr =
491 get_pending_smb2req_blr(smb2req);
495 if (timeval_is_zero(&blr->expire_time)) {
497 * If we're blocked on pid 0xFFFFFFFF this is
498 * a POSIX lock, so calculate a timeout of
499 * 10 seconds into the future.
501 if (blr->blocking_pid == 0xFFFFFFFF) {
502 struct timeval psx_to = timeval_current_ofs(10, 0);
503 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
509 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
512 if (timeval_is_zero(&next_timeout)) {
513 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
514 "timeout = Infinite.\n"));
519 * To account for unclean shutdowns by clients we need a
520 * maximum timeout that we use for checking pending locks. If
521 * we have any pending locks at all, then check if the pending
522 * lock can continue at least every brl:recalctime seconds
523 * (default 5 seconds).
525 * This saves us needing to do a message_send_all() in the
526 * SIGCHLD handler in the parent daemon. That
527 * message_send_all() caused O(n^2) work to be done when IP
528 * failovers happened in clustered Samba, which could make the
529 * entire system unusable for many minutes.
532 if (max_brl_timeout > 0) {
533 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
534 next_timeout = timeval_brl_min(&next_timeout, &min_to);
538 struct timeval cur, from_now;
540 cur = timeval_current();
541 from_now = timeval_until(&cur, &next_timeout);
542 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
543 "timeout = %d.%d seconds from now.\n",
544 (int)from_now.tv_sec, (int)from_now.tv_usec));
547 sconn->smb2.locks.brl_timeout = event_add_timed(
548 smbd_event_context(),
553 if (!sconn->smb2.locks.brl_timeout) {
559 /****************************************************************
560 Get an SMB2 lock reqeust to go async. lock_timeout should
562 *****************************************************************/
564 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
565 struct smb_request *smb1req,
570 enum brl_type lock_type,
571 enum brl_flavour lock_flav,
574 uint32_t blocking_pid)
576 struct smbd_server_connection *sconn = smbd_server_conn;
577 struct smbd_smb2_request *smb2req = smb1req->smb2req;
578 struct tevent_req *req = NULL;
579 struct smbd_smb2_lock_state *state = NULL;
580 NTSTATUS status = NT_STATUS_OK;
585 req = smb2req->subreq;
589 state = tevent_req_data(req, struct smbd_smb2_lock_state);
595 struct blocking_lock_record *blr = talloc_zero(state,
596 struct blocking_lock_record);
600 blr = talloc_zero(state, struct blocking_lock_record);
602 if (lock_timeout == -1) {
603 blr->expire_time.tv_sec = 0;
604 blr->expire_time.tv_usec = 0; /* Never expire. */
606 blr->expire_time = timeval_current_ofs(
608 (lock_timeout % 1000) * 1000);
611 blr->lock_num = lock_num;
612 blr->lock_pid = lock_pid;
613 blr->blocking_pid = blocking_pid;
614 blr->lock_flav = lock_flav;
615 blr->lock_type = lock_type;
616 blr->offset = offset;
619 /* Specific brl_lock() implementations can fill this in. */
620 blr->blr_private = NULL;
622 /* Add a pending lock record for this. */
623 status = brl_lock(smbd_messaging_context(),
629 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
635 if (!NT_STATUS_IS_OK(status)) {
636 DEBUG(0,("push_blocking_lock_request_smb2: "
637 "failed to add PENDING_LOCK record.\n"));
644 recalc_smb2_brl_timeout(sconn);
646 /* Ensure we'll receive messages when this is unlocked. */
647 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
648 messaging_register(smbd_messaging_context(), NULL,
649 MSG_SMB_UNLOCK, received_unlock_msg);
650 sconn->smb2.locks.blocking_lock_unlock_state = true;
653 /* allow this request to be canceled */
654 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
659 /****************************************************************
660 Re-proccess a blocking lock request.
661 This is equivalent to process_lockingX() inside smbd/blocking.c
662 *****************************************************************/
664 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req)
667 struct blocking_lock_record *blr = NULL;
668 struct smbd_smb2_lock_state *state = NULL;
669 files_struct *fsp = NULL;
671 if (!smb2req->subreq) {
674 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
682 /* Try and finish off getting all the outstanding locks. */
684 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
685 struct byte_range_lock *br_lck = NULL;
686 struct smbd_lock_element *e = &state->locks[blr->lock_num];
688 br_lck = do_lock(smbd_messaging_context(),
702 if (NT_STATUS_IS_ERR(status)) {
707 if(blr->lock_num == state->lock_count) {
709 * Success - we got all the locks.
712 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
713 "fnum=%d num_locks=%d\n",
716 (int)state->lock_count));
718 tevent_req_done(smb2req->subreq);
722 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
723 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
725 * We have other than a "can't get lock"
726 * error. Return an error.
728 tevent_req_nterror(smb2req->subreq, status);
733 * Still can't get all the locks - keep waiting.
736 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
737 "for file %s, fnum = %d. Waiting....\n",
739 (int)state->lock_count,
747 /****************************************************************
748 Attempt to proccess all outstanding blocking locks pending on
750 *****************************************************************/
752 void process_blocking_lock_queue_smb2(void)
754 struct smbd_server_connection *sconn = smbd_server_conn;
755 struct smbd_smb2_request *smb2req, *nextreq;
757 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
758 const uint8_t *inhdr;
760 nextreq = smb2req->next;
762 if (smb2req->subreq == NULL) {
763 /* This message has been processed. */
766 if (!tevent_req_is_in_progress(smb2req->subreq)) {
767 /* This message has been processed. */
771 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
772 if (IVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
773 reprocess_blocked_smb2_lock(smb2req);
777 recalc_smb2_brl_timeout(sconn);
780 /****************************************************************************
781 Remove any locks on this fd. Called from file_close().
782 ****************************************************************************/
784 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
785 struct byte_range_lock *br_lck)
787 struct smbd_server_connection *sconn = smbd_server_conn;
788 struct smbd_smb2_request *smb2req, *nextreq;
790 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
791 struct smbd_smb2_lock_state *state = NULL;
792 files_struct *fsp_curr = NULL;
793 int i = smb2req->current_idx;
794 uint64_t in_file_id_volatile;
795 struct blocking_lock_record *blr = NULL;
796 const uint8_t *inhdr;
797 const uint8_t *inbody;
799 nextreq = smb2req->next;
801 if (smb2req->subreq == NULL) {
802 /* This message has been processed. */
805 if (!tevent_req_is_in_progress(smb2req->subreq)) {
806 /* This message has been processed. */
810 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
811 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
812 /* Not a lock call. */
816 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
817 in_file_id_volatile = BVAL(inbody, 0x10);
819 state = tevent_req_data(smb2req->subreq,
820 struct smbd_smb2_lock_state);
822 /* Strange - is this even possible ? */
826 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
827 if (fsp_curr == NULL) {
828 /* Strange - is this even possible ? */
832 if (fsp_curr != fsp) {
833 /* It's not our fid */
839 /* Remove the entries from the lock db. */
840 brl_lock_cancel(br_lck,
848 /* Finally cancel the request. */
849 smb2req->cancelled = true;
850 tevent_req_cancel(smb2req->subreq);