enum brl_flavour {WINDOWS_LOCK = 0, POSIX_LOCK = 1};
#include "librpc/gen_ndr/server_id.h"
+#include "librpc/gen_ndr/misc.h"
/* This contains elements that differentiate locks. The smbpid is a
client supplied pid, and is essentially the locking context for
};
struct smbd_lock_element {
+ struct GUID req_guid;
uint64_t smblctx;
enum brl_type brltype;
uint64_t offset;
struct byte_range_lock {
struct files_struct *fsp;
+ TALLOC_CTX *req_mem_ctx;
+ const struct GUID *req_guid;
unsigned int num_locks;
bool modified;
struct lock_struct *lock_data;
return brl->fsp;
}
+TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl)
+{
+ if (brl->req_mem_ctx == NULL) {
+ return talloc_get_type_abort(brl, struct byte_range_lock);
+ }
+
+ return brl->req_mem_ctx;
+}
+
+const struct GUID *brl_req_guid(const struct byte_range_lock *brl)
+{
+ if (brl->req_guid == NULL) {
+ static const struct GUID brl_zero_req_guid;
+ return &brl_zero_req_guid;
+ }
+
+ return brl->req_guid;
+}
+
/****************************************************************************
See if two locking contexts are equal.
****************************************************************************/
return br_lck;
}
+struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
+ files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid)
+{
+ struct byte_range_lock *br_lck = NULL;
+
+ br_lck = brl_get_locks(mem_ctx, fsp);
+ if (br_lck == NULL) {
+ return NULL;
+ }
+ SMB_ASSERT(req_mem_ctx != NULL);
+ br_lck->req_mem_ctx = req_mem_ctx;
+ SMB_ASSERT(req_guid != NULL);
+ br_lck->req_guid = req_guid;
+
+ return br_lck;
+}
+
struct brl_get_locks_readonly_state {
TALLOC_CTX *mem_ctx;
struct byte_range_lock **br_lock;
/*
* No locks on this file. Return an empty br_lock.
*/
- br_lock = talloc(fsp, struct byte_range_lock);
+ br_lock = talloc_zero(fsp, struct byte_range_lock);
if (br_lock == NULL) {
return NULL;
}
- br_lock->num_locks = 0;
- br_lock->lock_data = NULL;
-
} else if (!NT_STATUS_IS_OK(status)) {
DEBUG(3, ("Could not parse byte range lock record: "
"%s\n", nt_errstr(status)));
struct do_lock_state {
struct files_struct *fsp;
+ TALLOC_CTX *req_mem_ctx;
+ const struct GUID *req_guid;
uint64_t smblctx;
uint64_t count;
uint64_t offset;
struct do_lock_state *state = private_data;
struct byte_range_lock *br_lck = NULL;
- br_lck = brl_get_locks(talloc_tos(), state->fsp);
+ br_lck = brl_get_locks_for_locking(talloc_tos(),
+ state->fsp,
+ state->req_mem_ctx,
+ state->req_guid);
if (br_lck == NULL) {
state->status = NT_STATUS_NO_MEMORY;
return;
}
NTSTATUS do_lock(files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid,
uint64_t smblctx,
uint64_t count,
uint64_t offset,
{
struct do_lock_state state = {
.fsp = fsp,
+ .req_mem_ctx = req_mem_ctx,
+ .req_guid = req_guid,
.smblctx = smblctx,
.count = count,
.offset = offset,
unsigned int brl_num_locks(const struct byte_range_lock *brl);
struct files_struct *brl_fsp(struct byte_range_lock *brl);
+TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl);
+const struct GUID *brl_req_guid(const struct byte_range_lock *brl);
bool byte_range_valid(uint64_t ofs, uint64_t len);
bool byte_range_overlap(uint64_t ofs1,
br_off start, br_off size,
void *private_data),
void *private_data);
+struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
+ files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid);
struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
files_struct *fsp);
struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp);
enum brl_type *plock_type,
enum brl_flavour lock_flav);
NTSTATUS do_lock(files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid,
uint64_t smblctx,
uint64_t count,
uint64_t offset,
bool netatalk_already_open_for_writing = false;
bool netatalk_already_open_with_deny_read = false;
bool netatalk_already_open_with_deny_write = false;
+ struct GUID req_guid = GUID_random();
/* FIXME: hardcoded data fork, add resource fork */
enum apple_fork fork_type = APPLE_FORK_DATA;
/* Set NetAtalk locks matching our access */
if (access_mask & FILE_READ_DATA) {
off = access_to_netatalk_brl(fork_type, FILE_READ_DATA);
+ req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
+ talloc_tos(),
+ &req_guid,
fsp->op->global->open_persistent_id,
1,
off,
if (!share_for_read) {
off = denymode_to_netatalk_brl(fork_type, DENY_READ);
+ req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
+ talloc_tos(),
+ &req_guid,
fsp->op->global->open_persistent_id,
1,
off,
if (access_mask & FILE_WRITE_DATA) {
off = access_to_netatalk_brl(fork_type, FILE_WRITE_DATA);
+ req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
+ talloc_tos(),
+ &req_guid,
fsp->op->global->open_persistent_id,
1,
off,
if (!share_for_write) {
off = denymode_to_netatalk_brl(fork_type, DENY_WRITE);
+ req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
+ talloc_tos(),
+ &req_guid,
fsp->op->global->open_persistent_id,
1,
off,
status = do_lock(
fsp,
+ locks, /* req_mem_ctx */
+ &e->req_guid,
e->smblctx,
e->count,
e->offset,
ZERO_STRUCT(sec_ctx_stack);
}
+
+struct GUID smbd_request_guid(struct smb_request *smb1req, uint16_t idx)
+{
+ struct GUID v = {
+ .time_low = (uint32_t)smb1req->mid,
+ .time_hi_and_version = idx,
+ };
+
+ if (smb1req->smb2req != NULL) {
+ v.time_mid = (uint16_t)smb1req->smb2req->current_idx;
+ } else {
+ v.time_mid = (uint16_t)(uintptr_t)smb1req->vwv;
+ }
+
+ SBVAL((uint8_t *)&v, 8, (uintptr_t)smb1req->xconn);
+
+ return v;
+}
void smbd_lock_socket(struct smbXsrv_connection *xconn);
void smbd_unlock_socket(struct smbXsrv_connection *xconn);
+struct GUID smbd_request_guid(struct smb_request *smb1req, uint16_t idx);
+
NTSTATUS smbd_do_unlocking(struct smb_request *req,
files_struct *fsp,
uint16_t num_ulocks,
*/
*lck = (struct smbd_lock_element) {
+ .req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = WRITE_LOCK,
.count = SVAL(req->vwv+1, 0),
if (numtowrite && !fsp->print_file) {
struct smbd_lock_element l = {
+ .req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = UNLOCK_LOCK,
.offset = startpos,
}
*lck = (struct smbd_lock_element) {
+ .req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = WRITE_LOCK,
.count = IVAL(req->vwv+1, 0),
}
lck = (struct smbd_lock_element) {
+ .req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = UNLOCK_LOCK,
.offset = IVAL(req->vwv+3, 0),
* smb_unlkrng structs
*/
for (i = 0; i < num_ulocks; i++) {
+ ulocks[i].req_guid = smbd_request_guid(req,
+ UINT16_MAX - i),
ulocks[i].smblctx = get_lock_pid(
data, i, large_file_format);
ulocks[i].count = get_lock_count(
}
for (i = 0; i < num_locks; i++) {
+ locks[i].req_guid = smbd_request_guid(req, i),
locks[i].smblctx = get_lock_pid(data, i, large_file_format);
locks[i].count = get_lock_count(data, i, large_file_format);
locks[i].offset = get_lock_offset(data, i, large_file_format);
return tevent_req_post(req, ev);
}
+ locks[i].req_guid = smbd_request_guid(smb2req->smb1req, i);
locks[i].smblctx = fsp->op->global->open_persistent_id;
locks[i].offset = in_locks[i].offset;
locks[i].count = in_locks[i].length;
if (lock_type == UNLOCK_LOCK) {
struct smbd_lock_element l = {
+ .req_guid = smbd_request_guid(req, 0),
.smblctx = smblctx,
.brltype = UNLOCK_LOCK,
.offset = offset,
}
*lck = (struct smbd_lock_element) {
+ .req_guid = smbd_request_guid(req, 0),
.smblctx = smblctx,
.brltype = lock_type,
.count = count,