2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "smbd/globals.h"
23 #include "../libcli/smb/smb_common.h"
25 struct smbd_smb2_lock_element {
31 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
32 struct tevent_context *ev,
33 struct smbd_smb2_request *smb2req,
35 uint64_t in_file_id_volatile,
36 uint16_t in_lock_count,
37 struct smbd_smb2_lock_element *in_locks);
38 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
40 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
41 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
44 const uint8_t *inbody;
45 const int i = req->current_idx;
46 size_t expected_body_size = 0x30;
49 uint16_t in_lock_count;
50 uint64_t in_file_id_persistent;
51 uint64_t in_file_id_volatile;
52 struct smbd_smb2_lock_element *in_locks;
53 struct tevent_req *subreq;
54 const uint8_t *lock_buffer;
57 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
58 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
59 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
62 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
64 body_size = SVAL(inbody, 0x00);
65 if (body_size != expected_body_size) {
66 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
69 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
71 in_lock_count = CVAL(inbody, 0x02);
72 /* 0x04 - 4 bytes reserved */
73 in_file_id_persistent = BVAL(inbody, 0x08);
74 in_file_id_volatile = BVAL(inbody, 0x10);
76 if (in_lock_count < 1) {
77 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
80 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
81 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
84 if (req->compat_chain_fsp) {
86 } else if (in_file_id_persistent != 0) {
87 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
90 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
92 if (in_locks == NULL) {
93 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
97 lock_buffer = inbody + 0x18;
99 in_locks[l].offset = BVAL(lock_buffer, 0x00);
100 in_locks[l].length = BVAL(lock_buffer, 0x08);
101 in_locks[l].flags = IVAL(lock_buffer, 0x10);
102 /* 0x14 - 4 reserved bytes */
104 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
106 for (l=1; l < in_lock_count; l++) {
107 in_locks[l].offset = BVAL(lock_buffer, 0x00);
108 in_locks[l].length = BVAL(lock_buffer, 0x08);
109 in_locks[l].flags = IVAL(lock_buffer, 0x10);
110 /* 0x14 - 4 reserved bytes */
115 subreq = smbd_smb2_lock_send(req,
116 req->sconn->smb2.event_ctx,
122 if (subreq == NULL) {
123 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
125 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
127 return smbd_smb2_request_pending_queue(req, subreq);
130 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
132 struct smbd_smb2_request *req = tevent_req_callback_data(subreq,
133 struct smbd_smb2_request);
136 NTSTATUS error; /* transport error */
138 status = smbd_smb2_lock_recv(subreq);
140 if (!NT_STATUS_IS_OK(status)) {
141 error = smbd_smb2_request_error(req, status);
142 if (!NT_STATUS_IS_OK(error)) {
143 smbd_server_connection_terminate(req->sconn,
150 outbody = data_blob_talloc(req->out.vector, NULL, 0x04);
151 if (outbody.data == NULL) {
152 error = smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
153 if (!NT_STATUS_IS_OK(error)) {
154 smbd_server_connection_terminate(req->sconn,
161 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
162 SSVAL(outbody.data, 0x02, 0); /* reserved */
164 error = smbd_smb2_request_done(req, outbody, NULL);
165 if (!NT_STATUS_IS_OK(error)) {
166 smbd_server_connection_terminate(req->sconn,
172 struct smbd_smb2_lock_state {
173 struct smbd_smb2_request *smb2req;
176 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
177 struct tevent_context *ev,
178 struct smbd_smb2_request *smb2req,
180 uint64_t in_file_id_volatile,
181 uint16_t in_lock_count,
182 struct smbd_smb2_lock_element *in_locks)
184 struct tevent_req *req;
185 struct smbd_smb2_lock_state *state;
186 struct smb_request *smbreq;
187 connection_struct *conn = smb2req->tcon->compat_conn;
189 int32_t timeout = -1;
190 bool isunlock = false;
192 struct smbd_lock_element *locks;
196 req = tevent_req_create(mem_ctx, &state,
197 struct smbd_smb2_lock_state);
201 state->smb2req = smb2req;
203 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
204 (unsigned long long)in_file_id_volatile));
206 smbreq = smbd_smb2_fake_smb_request(smb2req);
207 if (tevent_req_nomem(smbreq, req)) {
208 return tevent_req_post(req, ev);
211 fsp = file_fsp(smbreq, (uint16_t)in_file_id_volatile);
213 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
214 return tevent_req_post(req, ev);
216 if (conn != fsp->conn) {
217 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
218 return tevent_req_post(req, ev);
220 if (smb2req->session->vuid != fsp->vuid) {
221 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
222 return tevent_req_post(req, ev);
225 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
227 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
228 return tevent_req_post(req, ev);
231 switch (in_locks[0].flags) {
232 case SMB2_LOCK_FLAG_SHARED:
233 case SMB2_LOCK_FLAG_EXCLUSIVE:
234 if (in_lock_count > 1) {
235 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
236 return tevent_req_post(req, ev);
241 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
242 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
246 case SMB2_LOCK_FLAG_UNLOCK:
247 /* only the first lock gives the UNLOCK bit - see
254 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
255 return tevent_req_post(req, ev);
258 for (i=0; i<in_lock_count; i++) {
260 bool invalid = false;
262 switch (in_locks[i].flags) {
263 case SMB2_LOCK_FLAG_SHARED:
264 case SMB2_LOCK_FLAG_EXCLUSIVE:
266 tevent_req_nterror(req,
267 NT_STATUS_INVALID_PARAMETER);
268 return tevent_req_post(req, ev);
271 tevent_req_nterror(req,
272 NT_STATUS_INVALID_PARAMETER);
273 return tevent_req_post(req, ev);
277 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
278 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
280 tevent_req_nterror(req,
281 NT_STATUS_INVALID_PARAMETER);
282 return tevent_req_post(req, ev);
286 case SMB2_LOCK_FLAG_UNLOCK:
288 tevent_req_nterror(req,
289 NT_STATUS_INVALID_PARAMETER);
290 return tevent_req_post(req, ev);
297 * is the first element was a UNLOCK
298 * we need to deferr the error response
299 * to the backend, because we need to process
300 * all unlock elements before
305 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
306 return tevent_req_post(req, ev);
309 locks[i].smbpid = in_smbpid;
310 locks[i].offset = in_locks[i].offset;
311 locks[i].count = in_locks[i].length;
313 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
314 locks[i].brltype = WRITE_LOCK;
315 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
316 locks[i].brltype = READ_LOCK;
317 } else if (invalid) {
319 * this is an invalid UNLOCK element
320 * and the backend needs to test for
321 * brltype != UNLOCK_LOCK and return
322 * NT_STATUS_INVALID_PARAMER
324 locks[i].brltype = READ_LOCK;
326 locks[i].brltype = UNLOCK_LOCK;
329 max_count = UINT64_MAX - locks[i].offset;
330 if (locks[i].count > max_count) {
331 tevent_req_nterror(req, NT_STATUS_INVALID_LOCK_RANGE);
332 return tevent_req_post(req, ev);
337 status = smbd_do_locking(smbreq, fsp,
346 status = smbd_do_locking(smbreq, fsp,
355 if (!NT_STATUS_IS_OK(status)) {
356 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
357 status = NT_STATUS_LOCK_NOT_GRANTED;
359 tevent_req_nterror(req, status);
360 return tevent_req_post(req, ev);
364 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
365 return tevent_req_post(req, ev);
368 tevent_req_done(req);
369 return tevent_req_post(req, ev);
372 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
376 if (tevent_req_is_nterror(req, &status)) {
377 tevent_req_received(req);
381 tevent_req_received(req);
386 * Dummy (for now) function to cope with SMB2 blocking lock
390 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
391 struct smb_request *req,
396 enum brl_type lock_type,
397 enum brl_flavour lock_flav,
400 uint32_t blocking_pid)