2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "smbd/globals.h"
23 #include "../libcli/smb/smb_common.h"
24 #include "librpc/gen_ndr/messaging.h"
26 struct smbd_smb2_lock_element {
32 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
33 struct tevent_context *ev,
34 struct smbd_smb2_request *smb2req,
36 uint64_t in_file_id_volatile,
37 uint16_t in_lock_count,
38 struct smbd_smb2_lock_element *in_locks);
39 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
41 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
42 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
45 const uint8_t *inbody;
46 const int i = req->current_idx;
47 size_t expected_body_size = 0x30;
50 uint16_t in_lock_count;
51 uint64_t in_file_id_persistent;
52 uint64_t in_file_id_volatile;
53 struct smbd_smb2_lock_element *in_locks;
54 struct tevent_req *subreq;
55 const uint8_t *lock_buffer;
58 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
59 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
60 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
63 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
65 body_size = SVAL(inbody, 0x00);
66 if (body_size != expected_body_size) {
67 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
70 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
72 in_lock_count = CVAL(inbody, 0x02);
73 /* 0x04 - 4 bytes reserved */
74 in_file_id_persistent = BVAL(inbody, 0x08);
75 in_file_id_volatile = BVAL(inbody, 0x10);
77 if (in_lock_count < 1) {
78 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
81 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
82 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
85 if (req->compat_chain_fsp) {
87 } else if (in_file_id_persistent != 0) {
88 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
91 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
93 if (in_locks == NULL) {
94 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
98 lock_buffer = inbody + 0x18;
100 in_locks[l].offset = BVAL(lock_buffer, 0x00);
101 in_locks[l].length = BVAL(lock_buffer, 0x08);
102 in_locks[l].flags = IVAL(lock_buffer, 0x10);
103 /* 0x14 - 4 reserved bytes */
105 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
107 for (l=1; l < in_lock_count; l++) {
108 in_locks[l].offset = BVAL(lock_buffer, 0x00);
109 in_locks[l].length = BVAL(lock_buffer, 0x08);
110 in_locks[l].flags = IVAL(lock_buffer, 0x10);
111 /* 0x14 - 4 reserved bytes */
116 subreq = smbd_smb2_lock_send(req,
117 req->sconn->smb2.event_ctx,
123 if (subreq == NULL) {
124 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
126 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
128 return smbd_smb2_request_pending_queue(req, subreq);
131 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
133 struct smbd_smb2_request *req = tevent_req_callback_data(subreq,
134 struct smbd_smb2_request);
137 NTSTATUS error; /* transport error */
139 if (req->cancelled) {
140 const uint8_t *inhdr = (const uint8_t *)
141 req->in.vector[req->current_idx].iov_base;
142 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
144 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
145 (unsigned long long)mid ));
146 error = smbd_smb2_request_error(req, NT_STATUS_CANCELLED);
147 if (!NT_STATUS_IS_OK(error)) {
148 smbd_server_connection_terminate(req->sconn,
155 status = smbd_smb2_lock_recv(subreq);
157 if (!NT_STATUS_IS_OK(status)) {
158 error = smbd_smb2_request_error(req, status);
159 if (!NT_STATUS_IS_OK(error)) {
160 smbd_server_connection_terminate(req->sconn,
167 outbody = data_blob_talloc(req->out.vector, NULL, 0x04);
168 if (outbody.data == NULL) {
169 error = smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
170 if (!NT_STATUS_IS_OK(error)) {
171 smbd_server_connection_terminate(req->sconn,
178 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
179 SSVAL(outbody.data, 0x02, 0); /* reserved */
181 error = smbd_smb2_request_done(req, outbody, NULL);
182 if (!NT_STATUS_IS_OK(error)) {
183 smbd_server_connection_terminate(req->sconn,
189 struct smbd_smb2_lock_state {
190 struct smbd_smb2_request *smb2req;
191 struct smb_request *smb1req;
192 struct blocking_lock_record *blr;
194 struct smbd_lock_element *locks;
197 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
198 struct tevent_context *ev,
199 struct smbd_smb2_request *smb2req,
201 uint64_t in_file_id_volatile,
202 uint16_t in_lock_count,
203 struct smbd_smb2_lock_element *in_locks)
205 struct tevent_req *req;
206 struct smbd_smb2_lock_state *state;
207 struct smb_request *smb1req;
208 connection_struct *conn = smb2req->tcon->compat_conn;
210 int32_t timeout = -1;
211 bool isunlock = false;
213 struct smbd_lock_element *locks;
217 req = tevent_req_create(mem_ctx, &state,
218 struct smbd_smb2_lock_state);
222 state->smb2req = smb2req;
223 smb1req = smbd_smb2_fake_smb_request(smb2req);
224 if (tevent_req_nomem(smb1req, req)) {
225 return tevent_req_post(req, ev);
227 state->smb1req = smb1req;
229 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
230 (unsigned long long)in_file_id_volatile));
232 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
234 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
235 return tevent_req_post(req, ev);
237 if (conn != fsp->conn) {
238 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
239 return tevent_req_post(req, ev);
241 if (smb2req->session->vuid != fsp->vuid) {
242 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
243 return tevent_req_post(req, ev);
246 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
248 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
249 return tevent_req_post(req, ev);
252 switch (in_locks[0].flags) {
253 case SMB2_LOCK_FLAG_SHARED:
254 case SMB2_LOCK_FLAG_EXCLUSIVE:
255 if (in_lock_count > 1) {
256 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
257 return tevent_req_post(req, ev);
262 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
263 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
267 case SMB2_LOCK_FLAG_UNLOCK:
268 /* only the first lock gives the UNLOCK bit - see
275 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
276 return tevent_req_post(req, ev);
279 for (i=0; i<in_lock_count; i++) {
281 bool invalid = false;
283 switch (in_locks[i].flags) {
284 case SMB2_LOCK_FLAG_SHARED:
285 case SMB2_LOCK_FLAG_EXCLUSIVE:
287 tevent_req_nterror(req,
288 NT_STATUS_INVALID_PARAMETER);
289 return tevent_req_post(req, ev);
292 tevent_req_nterror(req,
293 NT_STATUS_INVALID_PARAMETER);
294 return tevent_req_post(req, ev);
298 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
299 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
301 tevent_req_nterror(req,
302 NT_STATUS_INVALID_PARAMETER);
303 return tevent_req_post(req, ev);
307 case SMB2_LOCK_FLAG_UNLOCK:
309 tevent_req_nterror(req,
310 NT_STATUS_INVALID_PARAMETER);
311 return tevent_req_post(req, ev);
318 * is the first element was a UNLOCK
319 * we need to deferr the error response
320 * to the backend, because we need to process
321 * all unlock elements before
326 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
327 return tevent_req_post(req, ev);
330 locks[i].smbpid = in_smbpid;
331 locks[i].offset = in_locks[i].offset;
332 locks[i].count = in_locks[i].length;
334 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
335 locks[i].brltype = WRITE_LOCK;
336 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
337 locks[i].brltype = READ_LOCK;
338 } else if (invalid) {
340 * this is an invalid UNLOCK element
341 * and the backend needs to test for
342 * brltype != UNLOCK_LOCK and return
343 * NT_STATUS_INVALID_PARAMER
345 locks[i].brltype = READ_LOCK;
347 locks[i].brltype = UNLOCK_LOCK;
350 max_count = UINT64_MAX - locks[i].offset;
351 if (locks[i].count > max_count) {
352 tevent_req_nterror(req, NT_STATUS_INVALID_LOCK_RANGE);
353 return tevent_req_post(req, ev);
357 state->locks = locks;
358 state->lock_count = in_lock_count;
361 status = smbd_do_locking(smb1req, fsp,
370 status = smbd_do_locking(smb1req, fsp,
379 if (!NT_STATUS_IS_OK(status)) {
380 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
381 status = NT_STATUS_LOCK_NOT_GRANTED;
383 tevent_req_nterror(req, status);
384 return tevent_req_post(req, ev);
391 tevent_req_done(req);
392 return tevent_req_post(req, ev);
395 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
399 if (tevent_req_is_nterror(req, &status)) {
400 tevent_req_received(req);
404 tevent_req_received(req);
408 /****************************************************************
409 Cancel an outstanding blocking lock request.
410 *****************************************************************/
412 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
414 struct smbd_smb2_request *smb2req = NULL;
415 struct smbd_smb2_lock_state *state = tevent_req_data(req,
416 struct smbd_smb2_lock_state);
421 if (!state->smb2req) {
425 smb2req = state->smb2req;
426 smb2req->cancelled = true;
428 tevent_req_done(req);
432 /****************************************************************
433 Got a message saying someone unlocked a file. Re-schedule all
434 blocking lock requests as we don't know if anything overlapped.
435 *****************************************************************/
437 static void received_unlock_msg(struct messaging_context *msg,
440 struct server_id server_id,
443 DEBUG(10,("received_unlock_msg (SMB2)\n"));
444 process_blocking_lock_queue_smb2();
447 /****************************************************************
448 Function to get the blr on a pending record.
449 *****************************************************************/
451 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
453 struct smbd_smb2_lock_state *state = NULL;
454 const uint8_t *inhdr;
459 if (smb2req->subreq == NULL) {
462 if (!tevent_req_is_in_progress(smb2req->subreq)) {
465 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
466 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
469 state = tevent_req_data(smb2req->subreq,
470 struct smbd_smb2_lock_state);
476 /****************************************************************
477 Set up the next brl timeout.
478 *****************************************************************/
480 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
482 struct smbd_smb2_request *smb2req;
483 struct timeval next_timeout;
484 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
486 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
488 next_timeout = timeval_zero();
490 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
491 struct blocking_lock_record *blr =
492 get_pending_smb2req_blr(smb2req);
496 if (timeval_is_zero(&blr->expire_time)) {
498 * If we're blocked on pid 0xFFFFFFFF this is
499 * a POSIX lock, so calculate a timeout of
500 * 10 seconds into the future.
502 if (blr->blocking_pid == 0xFFFFFFFF) {
503 struct timeval psx_to = timeval_current_ofs(10, 0);
504 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
510 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
513 if (timeval_is_zero(&next_timeout)) {
514 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
515 "timeout = Infinite.\n"));
520 * To account for unclean shutdowns by clients we need a
521 * maximum timeout that we use for checking pending locks. If
522 * we have any pending locks at all, then check if the pending
523 * lock can continue at least every brl:recalctime seconds
524 * (default 5 seconds).
526 * This saves us needing to do a message_send_all() in the
527 * SIGCHLD handler in the parent daemon. That
528 * message_send_all() caused O(n^2) work to be done when IP
529 * failovers happened in clustered Samba, which could make the
530 * entire system unusable for many minutes.
533 if (max_brl_timeout > 0) {
534 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
535 next_timeout = timeval_brl_min(&next_timeout, &min_to);
539 struct timeval cur, from_now;
541 cur = timeval_current();
542 from_now = timeval_until(&cur, &next_timeout);
543 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
544 "timeout = %d.%d seconds from now.\n",
545 (int)from_now.tv_sec, (int)from_now.tv_usec));
548 sconn->smb2.locks.brl_timeout = event_add_timed(
549 smbd_event_context(),
554 if (!sconn->smb2.locks.brl_timeout) {
560 /****************************************************************
561 Get an SMB2 lock reqeust to go async. lock_timeout should
563 *****************************************************************/
565 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
566 struct smb_request *smb1req,
571 enum brl_type lock_type,
572 enum brl_flavour lock_flav,
575 uint32_t blocking_pid)
577 struct smbd_server_connection *sconn = smbd_server_conn;
578 struct smbd_smb2_request *smb2req = smb1req->smb2req;
579 struct tevent_req *req = NULL;
580 struct smbd_smb2_lock_state *state = NULL;
581 NTSTATUS status = NT_STATUS_OK;
586 req = smb2req->subreq;
590 state = tevent_req_data(req, struct smbd_smb2_lock_state);
596 struct blocking_lock_record *blr = talloc_zero(state,
597 struct blocking_lock_record);
601 blr = talloc_zero(state, struct blocking_lock_record);
603 if (lock_timeout == -1) {
604 blr->expire_time.tv_sec = 0;
605 blr->expire_time.tv_usec = 0; /* Never expire. */
607 blr->expire_time = timeval_current_ofs(
609 (lock_timeout % 1000) * 1000);
612 blr->lock_num = lock_num;
613 blr->lock_pid = lock_pid;
614 blr->blocking_pid = blocking_pid;
615 blr->lock_flav = lock_flav;
616 blr->lock_type = lock_type;
617 blr->offset = offset;
620 /* Specific brl_lock() implementations can fill this in. */
621 blr->blr_private = NULL;
623 /* Add a pending lock record for this. */
624 status = brl_lock(smbd_messaging_context(),
630 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
636 if (!NT_STATUS_IS_OK(status)) {
637 DEBUG(0,("push_blocking_lock_request_smb2: "
638 "failed to add PENDING_LOCK record.\n"));
645 recalc_smb2_brl_timeout(sconn);
647 /* Ensure we'll receive messages when this is unlocked. */
648 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
649 messaging_register(smbd_messaging_context(), NULL,
650 MSG_SMB_UNLOCK, received_unlock_msg);
651 sconn->smb2.locks.blocking_lock_unlock_state = true;
654 /* allow this request to be canceled */
655 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
660 /****************************************************************
661 Re-proccess a blocking lock request.
662 This is equivalent to process_lockingX() inside smbd/blocking.c
663 *****************************************************************/
665 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req)
668 struct blocking_lock_record *blr = NULL;
669 struct smbd_smb2_lock_state *state = NULL;
670 files_struct *fsp = NULL;
672 if (!smb2req->subreq) {
675 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
683 /* Try and finish off getting all the outstanding locks. */
685 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
686 struct byte_range_lock *br_lck = NULL;
687 struct smbd_lock_element *e = &state->locks[blr->lock_num];
689 br_lck = do_lock(smbd_messaging_context(),
703 if (NT_STATUS_IS_ERR(status)) {
708 if(blr->lock_num == state->lock_count) {
710 * Success - we got all the locks.
713 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
714 "fnum=%d num_locks=%d\n",
717 (int)state->lock_count));
719 tevent_req_done(smb2req->subreq);
723 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
724 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
726 * We have other than a "can't get lock"
727 * error. Return an error.
729 tevent_req_nterror(smb2req->subreq, status);
734 * Still can't get all the locks - keep waiting.
737 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
738 "for file %s, fnum = %d. Waiting....\n",
740 (int)state->lock_count,
748 /****************************************************************
749 Attempt to proccess all outstanding blocking locks pending on
751 *****************************************************************/
753 void process_blocking_lock_queue_smb2(void)
755 struct smbd_server_connection *sconn = smbd_server_conn;
756 struct smbd_smb2_request *smb2req, *nextreq;
758 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
759 const uint8_t *inhdr;
761 nextreq = smb2req->next;
763 if (smb2req->subreq == NULL) {
764 /* This message has been processed. */
767 if (!tevent_req_is_in_progress(smb2req->subreq)) {
768 /* This message has been processed. */
772 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
773 if (IVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
774 reprocess_blocked_smb2_lock(smb2req);
778 recalc_smb2_brl_timeout(sconn);
781 /****************************************************************************
782 Remove any locks on this fd. Called from file_close().
783 ****************************************************************************/
785 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
786 struct byte_range_lock *br_lck)
788 struct smbd_server_connection *sconn = smbd_server_conn;
789 struct smbd_smb2_request *smb2req, *nextreq;
791 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
792 struct smbd_smb2_lock_state *state = NULL;
793 files_struct *fsp_curr = NULL;
794 int i = smb2req->current_idx;
795 uint64_t in_file_id_volatile;
796 struct blocking_lock_record *blr = NULL;
797 const uint8_t *inhdr;
798 const uint8_t *inbody;
800 nextreq = smb2req->next;
802 if (smb2req->subreq == NULL) {
803 /* This message has been processed. */
806 if (!tevent_req_is_in_progress(smb2req->subreq)) {
807 /* This message has been processed. */
811 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
812 if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
813 /* Not a lock call. */
817 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
818 in_file_id_volatile = BVAL(inbody, 0x10);
820 state = tevent_req_data(smb2req->subreq,
821 struct smbd_smb2_lock_state);
823 /* Strange - is this even possible ? */
827 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
828 if (fsp_curr == NULL) {
829 /* Strange - is this even possible ? */
833 if (fsp_curr != fsp) {
834 /* It's not our fid */
840 /* Remove the entries from the lock db. */
841 brl_lock_cancel(br_lck,
849 /* Finally cancel the request. */
850 smb2req->cancelled = true;
851 tevent_req_cancel(smb2req->subreq);