#include "librpc/ndr/libndr.h"
#include "libcli/smb/smb2_negotiate_context.h"
#include "lib/crypto/sha512.h"
+#include "lib/crypto/aes.h"
+#include "lib/crypto/aes_ccm_128.h"
+#include "lib/crypto/aes_gcm_128.h"
struct smbXcli_conn;
struct smbXcli_req;
struct smbXcli_tcon;
struct smbXcli_conn {
- int read_fd;
- int write_fd;
+ int sock_fd;
struct sockaddr_storage local_ss;
struct sockaddr_storage remote_ss;
const char *remote_name;
struct tevent_queue *outgoing;
struct tevent_req **pending;
struct tevent_req *read_smb_req;
+ struct tevent_req *suicide_req;
enum protocol_types min_protocol;
enum protocol_types max_protocol;
uint16_t cur_credits;
uint16_t max_credits;
+ uint32_t cc_chunk_len;
+ uint32_t cc_max_chunks;
+
uint8_t io_priority;
+ bool force_channel_sequence;
+
uint8_t preauth_sha512[64];
} smb2;
bool should_encrypt;
DATA_BLOB encryption_key;
DATA_BLOB decryption_key;
+ uint64_t nonce_high_random;
+ uint64_t nonce_high_max;
uint64_t nonce_high;
uint64_t nonce_low;
uint16_t channel_sequence;
bool replay_active;
+ bool require_signed_response;
};
struct smbXcli_session {
struct {
uint16_t session_id;
+ uint16_t action;
DATA_BLOB application_key;
bool protected_key;
} smb1;
uint8_t *inbuf;
+ struct tevent_req *write_req;
+
+ struct timeval endtime;
+
struct {
/* Space for the header including the wct */
uint8_t hdr[HDR_VWV];
uint64_t encryption_session_id;
bool signing_skipped;
+ bool require_signed_response;
bool notify_async;
bool got_async;
uint16_t cancel_flags;
return NULL;
}
- conn->read_fd = fd;
- conn->write_fd = dup(fd);
- if (conn->write_fd == -1) {
- goto error;
- }
+ conn->sock_fd = fd;
conn->remote_name = talloc_strdup(conn, remote_name);
if (conn->remote_name == NULL) {
goto error;
}
-
ss = (void *)&conn->local_ss;
sa = (struct sockaddr *)ss;
sa_length = sizeof(conn->local_ss);
conn->desire_signing = false;
conn->mandatory_signing = false;
break;
+ case SMB_SIGNING_DESIRED:
+ /* if the server desires it */
+ conn->allow_signing = true;
+ conn->desire_signing = true;
+ conn->mandatory_signing = false;
+ break;
+ case SMB_SIGNING_IPC_DEFAULT:
case SMB_SIGNING_REQUIRED:
/* always */
conn->allow_signing = true;
conn->smb2.max_credits = 0;
conn->smb2.io_priority = 1;
+ /*
+ * Samba and Windows servers accept a maximum of 16 MiB with a maximum
+ * chunk length of 1 MiB.
+ */
+ conn->smb2.cc_chunk_len = 1024 * 1024;
+ conn->smb2.cc_max_chunks = 16;
+
talloc_set_destructor(conn, smbXcli_conn_destructor);
return conn;
error:
- if (conn->write_fd != -1) {
- close(conn->write_fd);
- }
TALLOC_FREE(conn);
return NULL;
}
return false;
}
- if (conn->read_fd == -1) {
+ if (conn->sock_fd == -1) {
return false;
}
return false;
}
+bool smbXcli_conn_signing_mandatory(struct smbXcli_conn *conn)
+{
+ return conn->mandatory_signing;
+}
+
+/*
+ * [MS-SMB] 2.2.2.3.5 - SMB1 support for passing through
+ * query/set commands to the file system
+ */
+bool smbXcli_conn_support_passthrough(struct smbXcli_conn *conn)
+{
+ if (conn->protocol >= PROTOCOL_SMB2_02) {
+ return true;
+ }
+
+ if (conn->smb1.capabilities & CAP_W2K_SMBS) {
+ return true;
+ }
+
+ return false;
+}
+
void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
{
- set_socket_options(conn->read_fd, options);
+ set_socket_options(conn->sock_fd, options);
}
const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
return &conn->smb1.server.guid;
}
+bool smbXcli_conn_get_force_channel_sequence(struct smbXcli_conn *conn)
+{
+ return conn->smb2.force_channel_sequence;
+}
+
+void smbXcli_conn_set_force_channel_sequence(struct smbXcli_conn *conn,
+ bool v)
+{
+ conn->smb2.force_channel_sequence = v;
+}
+
struct smbXcli_conn_samba_suicide_state {
struct smbXcli_conn *conn;
struct iovec iov;
uint8_t buf[9];
+ struct tevent_req *write_req;
};
+static void smbXcli_conn_samba_suicide_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state);
static void smbXcli_conn_samba_suicide_done(struct tevent_req *subreq);
struct tevent_req *smbXcli_conn_samba_suicide_send(TALLOC_CTX *mem_ctx,
SCVAL(state->buf, 8, exitcode);
_smb_setlen_nbt(state->buf, sizeof(state->buf)-4);
+ if (conn->suicide_req != NULL) {
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+
state->iov.iov_base = state->buf;
state->iov.iov_len = sizeof(state->buf);
- subreq = writev_send(state, ev, conn->outgoing, conn->write_fd,
+ subreq = writev_send(state, ev, conn->outgoing, conn->sock_fd,
false, &state->iov, 1);
if (tevent_req_nomem(subreq, req)) {
return tevent_req_post(req, ev);
}
tevent_req_set_callback(subreq, smbXcli_conn_samba_suicide_done, req);
+ state->write_req = subreq;
+
+ tevent_req_set_cleanup_fn(req, smbXcli_conn_samba_suicide_cleanup);
+
+ /*
+ * We need to use tevent_req_defer_callback()
+ * in order to allow smbXcli_conn_disconnect()
+ * to do a safe cleanup.
+ */
+ tevent_req_defer_callback(req, ev);
+ conn->suicide_req = req;
+
return req;
}
+static void smbXcli_conn_samba_suicide_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state)
+{
+ struct smbXcli_conn_samba_suicide_state *state = tevent_req_data(
+ req, struct smbXcli_conn_samba_suicide_state);
+
+ TALLOC_FREE(state->write_req);
+
+ if (state->conn == NULL) {
+ return;
+ }
+
+ if (state->conn->suicide_req == req) {
+ state->conn->suicide_req = NULL;
+ }
+ state->conn = NULL;
+}
+
static void smbXcli_conn_samba_suicide_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
ssize_t nwritten;
int err;
+ state->write_req = NULL;
+
nwritten = writev_recv(subreq, &err);
TALLOC_FREE(subreq);
if (nwritten == -1) {
+ /* here, we need to notify all pending requests */
NTSTATUS status = map_nt_error_from_unix_common(err);
smbXcli_conn_disconnect(state->conn, status);
return;
bool smb1cli_conn_req_possible(struct smbXcli_conn *conn)
{
- size_t pending;
+ size_t pending = talloc_array_length(conn->pending);
uint16_t possible = conn->smb1.server.max_mux;
- pending = tevent_queue_length(conn->outgoing);
- if (pending >= possible) {
- return false;
- }
- pending += talloc_array_length(conn->pending);
if (pending >= possible) {
return false;
}
}
}
+static NTSTATUS smbXcli_req_cancel_write_req(struct tevent_req *req)
+{
+ struct smbXcli_req_state *state =
+ tevent_req_data(req,
+ struct smbXcli_req_state);
+ struct smbXcli_conn *conn = state->conn;
+ size_t num_pending = talloc_array_length(conn->pending);
+ ssize_t ret;
+ int err;
+ bool ok;
+
+ if (state->write_req == NULL) {
+ return NT_STATUS_OK;
+ }
+
+ /*
+ * Check if it's possible to cancel the request.
+ * If the result is true it's not too late.
+ * See writev_cancel().
+ */
+ ok = tevent_req_cancel(state->write_req);
+ if (ok) {
+ TALLOC_FREE(state->write_req);
+
+ if (conn->protocol >= PROTOCOL_SMB2_02) {
+ /*
+ * SMB2 has a sane signing state.
+ */
+ return NT_STATUS_OK;
+ }
+
+ if (num_pending > 1) {
+ /*
+ * We have more pending requests following us. This
+ * means the signing state will be broken for them.
+ *
+ * As a solution we could add the requests directly to
+ * our outgoing queue and do the signing in the trigger
+ * function and then use writev_send() without passing a
+ * queue. That way we'll only sign packets we're most
+ * likely send to the wire.
+ */
+ return NT_STATUS_REQUEST_OUT_OF_SEQUENCE;
+ }
+
+ /*
+ * If we're the only request that's
+ * pending, we're able to recover the signing
+ * state.
+ */
+ smb_signing_cancel_reply(conn->smb1.signing,
+ state->smb1.one_way_seqnum);
+ return NT_STATUS_OK;
+ }
+
+ ret = writev_recv(state->write_req, &err);
+ TALLOC_FREE(state->write_req);
+ if (ret == -1) {
+ return map_nt_error_from_unix_common(err);
+ }
+
+ return NT_STATUS_OK;
+}
+
void smbXcli_req_unset_pending(struct tevent_req *req)
{
struct smbXcli_req_state *state =
struct smbXcli_conn *conn = state->conn;
size_t num_pending = talloc_array_length(conn->pending);
size_t i;
+ NTSTATUS cancel_status;
+
+ cancel_status = smbXcli_req_cancel_write_req(req);
if (state->smb1.mid != 0) {
/*
* This is a [nt]trans[2] request which waits
* for more than one reply.
*/
+ if (!NT_STATUS_IS_OK(cancel_status)) {
+ /*
+ * If the write_req cancel didn't work
+ * we can't use the connection anymore.
+ */
+ smbXcli_conn_disconnect(conn, cancel_status);
+ return;
+ }
return;
}
* conn->pending. So if nothing is pending anymore, we need to
* delete the socket read fde.
*/
+ /* TODO: smbXcli_conn_cancel_read_req */
TALLOC_FREE(conn->pending);
conn->read_smb_req = NULL;
+
+ if (!NT_STATUS_IS_OK(cancel_status)) {
+ /*
+ * If the write_req cancel didn't work
+ * we can't use the connection anymore.
+ */
+ smbXcli_conn_disconnect(conn, cancel_status);
+ return;
+ }
return;
}
* right thing nevertheless, the point of this routine is to
* remove ourselves from conn->pending.
*/
+
+ if (!NT_STATUS_IS_OK(cancel_status)) {
+ /*
+ * If the write_req cancel didn't work
+ * we can't use the connection anymore.
+ */
+ smbXcli_conn_disconnect(conn, cancel_status);
+ return;
+ }
return;
}
*/
conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
num_pending - 1);
+
+ if (!NT_STATUS_IS_OK(cancel_status)) {
+ /*
+ * If the write_req cancel didn't work
+ * we can't use the connection anymore.
+ */
+ smbXcli_conn_disconnect(conn, cancel_status);
+ return;
+ }
return;
}
struct smbXcli_req_state *state =
tevent_req_data(req,
struct smbXcli_req_state);
+ struct smbXcli_conn *conn = state->conn;
+ NTSTATUS cancel_status;
switch (req_state) {
case TEVENT_REQ_RECEIVED:
/*
* Make sure we really remove it from
* the pending array on destruction.
+ *
+ * smbXcli_req_unset_pending() calls
+ * smbXcli_req_cancel_write_req() internal
*/
state->smb1.mid = 0;
smbXcli_req_unset_pending(req);
return;
default:
+ cancel_status = smbXcli_req_cancel_write_req(req);
+ if (!NT_STATUS_IS_OK(cancel_status)) {
+ /*
+ * If the write_req cancel didn't work
+ * we can't use the connection anymore.
+ */
+ smbXcli_conn_disconnect(conn, cancel_status);
+ return;
+ }
return;
}
}
*/
conn->read_smb_req = read_smb_send(conn->pending,
state->ev,
- conn->read_fd);
+ conn->sock_fd);
if (conn->read_smb_req == NULL) {
return false;
}
void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
{
struct smbXcli_session *session;
+ int sock_fd = conn->sock_fd;
tevent_queue_stop(conn->outgoing);
- if (conn->read_fd != -1) {
- close(conn->read_fd);
- }
- if (conn->write_fd != -1) {
- close(conn->write_fd);
- }
- conn->read_fd = -1;
- conn->write_fd = -1;
+ conn->sock_fd = -1;
session = conn->sessions;
if (talloc_array_length(conn->pending) == 0) {
smb2cli_session_increment_channel_sequence(session);
}
+ if (conn->suicide_req != NULL) {
+ /*
+ * smbXcli_conn_samba_suicide_send()
+ * used tevent_req_defer_callback() already.
+ */
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(conn->suicide_req, status);
+ }
+ conn->suicide_req = NULL;
+ }
+
/*
* Cancel all pending requests. We do not do a for-loop walking
* conn->pending because that array changes in
state = tevent_req_data(req, struct smbXcli_req_state);
if (state->smb1.chained_requests == NULL) {
+ bool in_progress;
+
/*
* We're dead. No point waiting for trans2
* replies.
continue;
}
+ in_progress = tevent_req_is_in_progress(req);
+ if (!in_progress) {
+ /*
+ * already finished
+ */
+ continue;
+ }
+
/*
* we need to defer the callback, because we may notify
* more then one caller.
num_chained = talloc_array_length(chain);
for (i=0; i<num_chained; i++) {
+ bool in_progress;
+
req = chain[i];
state = tevent_req_data(req, struct smbXcli_req_state);
continue;
}
+ in_progress = tevent_req_is_in_progress(req);
+ if (!in_progress) {
+ /*
+ * already finished
+ */
+ continue;
+ }
+
/*
* we need to defer the callback, because we may notify
* more than one caller.
}
TALLOC_FREE(chain);
}
+
+ if (sock_fd != -1) {
+ close(sock_fd);
+ }
}
/*
return ret;
}
-static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
- const struct iovec *iov,
- int count)
-{
- ssize_t buflen;
- uint8_t *buf;
-
- buflen = iov_buflen(iov, count);
- if (buflen == -1) {
- return NULL;
- }
-
- buf = talloc_array(mem_ctx, uint8_t, buflen);
- if (buf == NULL) {
- return NULL;
- }
-
- iov_buf(iov, count, buf, buflen);
-
- return buf;
-}
-
static void smb1cli_req_flags(enum protocol_types protocol,
uint32_t smb1_capabilities,
uint8_t smb_command,
state->smb1.iov_count = iov_count + 4;
if (timeout_msec > 0) {
- struct timeval endtime;
-
- endtime = timeval_current_ofs_msec(timeout_msec);
- if (!tevent_req_set_endtime(req, ev, endtime)) {
+ state->endtime = timeval_current_ofs_msec(timeout_msec);
+ if (!tevent_req_set_endtime(req, ev, state->endtime)) {
return req;
}
}
frame = talloc_stackframe();
- buf = smbXcli_iov_concat(frame, &iov[1], iov_count - 1);
+ buf = iov_concat(frame, &iov[1], iov_count - 1);
if (buf == NULL) {
return NT_STATUS_NO_MEMORY;
}
}
if (state->conn->protocol > PROTOCOL_NT1) {
+ DBG_ERR("called for dialect[%s] server[%s]\n",
+ smb_protocol_types_string(state->conn->protocol),
+ smbXcli_conn_remote_name(state->conn));
return NT_STATUS_REVISION_MISMATCH;
}
if (common_encryption_on(state->conn->smb1.trans_enc)) {
char *buf, *enc_buf;
- buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
+ buf = (char *)iov_concat(talloc_tos(), iov, iov_count);
if (buf == NULL) {
return NT_STATUS_NO_MEMORY;
}
state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
}
+ if (!smbXcli_req_set_pending(req)) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
subreq = writev_send(state, state->ev, state->conn->outgoing,
- state->conn->write_fd, false, iov, iov_count);
+ state->conn->sock_fd, false, iov, iov_count);
if (subreq == NULL) {
return NT_STATUS_NO_MEMORY;
}
tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
+ state->write_req = subreq;
+
return NT_STATUS_OK;
}
ssize_t nwritten;
int err;
+ state->write_req = NULL;
+
nwritten = writev_recv(subreq, &err);
TALLOC_FREE(subreq);
if (nwritten == -1) {
+ /* here, we need to notify all pending requests */
NTSTATUS status = map_nt_error_from_unix_common(err);
smbXcli_conn_disconnect(state->conn, status);
- tevent_req_nterror(req, status);
return;
}
tevent_req_done(req);
return;
}
-
- if (!smbXcli_req_set_pending(req)) {
- tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
- return;
- }
}
static void smbXcli_conn_received(struct tevent_req *subreq)
struct iovec **piov, int *pnum_iov)
{
struct iovec *iov;
- int num_iov;
+ size_t num_iov;
size_t buflen;
size_t taken;
size_t remaining;
wct_ofs = SVAL(cur[0].iov_base, 2);
if (wct_ofs < taken) {
- return NT_STATUS_INVALID_NETWORK_RESPONSE;
+ goto inval;
}
if (wct_ofs > buflen) {
- return NT_STATUS_INVALID_NETWORK_RESPONSE;
+ goto inval;
}
/*
conn->smb2.max_credits = max_credits;
}
+uint16_t smb2cli_conn_get_cur_credits(struct smbXcli_conn *conn)
+{
+ return conn->smb2.cur_credits;
+}
+
uint8_t smb2cli_conn_get_io_priority(struct smbXcli_conn *conn)
{
if (conn->protocol < PROTOCOL_SMB3_11) {
conn->smb2.io_priority = io_priority;
}
+uint32_t smb2cli_conn_cc_chunk_len(struct smbXcli_conn *conn)
+{
+ return conn->smb2.cc_chunk_len;
+}
+
+void smb2cli_conn_set_cc_chunk_len(struct smbXcli_conn *conn,
+ uint32_t chunk_len)
+{
+ conn->smb2.cc_chunk_len = chunk_len;
+}
+
+uint32_t smb2cli_conn_cc_max_chunks(struct smbXcli_conn *conn)
+{
+ return conn->smb2.cc_max_chunks;
+}
+
+void smb2cli_conn_set_cc_max_chunks(struct smbXcli_conn *conn,
+ uint32_t max_chunks)
+{
+ conn->smb2.cc_max_chunks = max_chunks;
+}
+
static void smb2cli_req_cancel_done(struct tevent_req *subreq);
static bool smb2cli_req_cancel(struct tevent_req *req)
TALLOC_FREE(subreq);
}
+struct timeval smbXcli_req_endtime(struct tevent_req *req)
+{
+ struct smbXcli_req_state *state = tevent_req_data(
+ req, struct smbXcli_req_state);
+
+ return state->endtime;
+}
+
struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
struct smbXcli_conn *conn,
uint32_t flags = 0;
uint32_t tid = 0;
uint64_t uid = 0;
- bool use_channel_sequence = false;
+ bool use_channel_sequence = conn->smb2.force_channel_sequence;
uint16_t channel_sequence = 0;
bool use_replay_flag = false;
state->smb2.should_sign = session->smb2->should_sign;
state->smb2.should_encrypt = session->smb2->should_encrypt;
+ state->smb2.require_signed_response =
+ session->smb2->require_signed_response;
if (cmd == SMB2_OP_SESSSETUP &&
session->smb2_channel.signing_key.length == 0 &&
}
if (timeout_msec > 0) {
- struct timeval endtime;
-
- endtime = timeval_current_ofs_msec(timeout_msec);
- if (!tevent_req_set_endtime(req, ev, endtime)) {
+ state->endtime = timeval_current_ofs_msec(timeout_msec);
+ if (!tevent_req_set_endtime(req, ev, state->endtime)) {
return req;
}
}
int tf_iov = -1;
const DATA_BLOB *encryption_key = NULL;
uint64_t encryption_session_id = 0;
+ uint64_t nonce_high = UINT64_MAX;
+ uint64_t nonce_low = UINT64_MAX;
/*
* 1 for the nbt length, optional TRANSFORM
encryption_session_id = state->session->smb2->session_id;
+ state->session->smb2->nonce_low += 1;
+ if (state->session->smb2->nonce_low == 0) {
+ state->session->smb2->nonce_high += 1;
+ state->session->smb2->nonce_low += 1;
+ }
+
+ /*
+ * CCM and GCM algorithms must never have their
+ * nonce wrap, or the security of the whole
+ * communication and the keys is destroyed.
+ * We must drop the connection once we have
+ * transfered too much data.
+ *
+ * NOTE: We assume nonces greater than 8 bytes.
+ */
+ if (state->session->smb2->nonce_high >=
+ state->session->smb2->nonce_high_max)
+ {
+ return NT_STATUS_ENCRYPTION_FAILED;
+ }
+
+ nonce_high = state->session->smb2->nonce_high_random;
+ nonce_high += state->session->smb2->nonce_high;
+ nonce_low = state->session->smb2->nonce_low;
+
tf_iov = num_iov;
iov[num_iov].iov_base = state->smb2.transform;
iov[num_iov].iov_len = sizeof(state->smb2.transform);
SBVAL(state->smb2.transform, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
SBVAL(state->smb2.transform, SMB2_TF_NONCE,
- state->session->smb2->nonce_low);
+ nonce_low);
SBVAL(state->smb2.transform, SMB2_TF_NONCE+8,
- state->session->smb2->nonce_high);
+ nonce_high);
SBVAL(state->smb2.transform, SMB2_TF_SESSION_ID,
encryption_session_id);
- state->session->smb2->nonce_low += 1;
- if (state->session->smb2->nonce_low == 0) {
- state->session->smb2->nonce_high += 1;
- state->session->smb2->nonce_low += 1;
- }
-
nbt_len += SMB2_TF_HDR_SIZE;
break;
}
}
subreq = writev_send(state, state->ev, state->conn->outgoing,
- state->conn->write_fd, false, iov, num_iov);
+ state->conn->sock_fd, false, iov, num_iov);
if (subreq == NULL) {
return NT_STATUS_NO_MEMORY;
}
tevent_req_set_callback(subreq, smb2cli_req_writev_done, reqs[0]);
+ state->write_req = subreq;
+
return NT_STATUS_OK;
}
ssize_t nwritten;
int err;
+ state->write_req = NULL;
+
nwritten = writev_recv(subreq, &err);
TALLOC_FREE(subreq);
if (nwritten == -1) {
}
}
+static struct smbXcli_session* smbXcli_session_by_uid(struct smbXcli_conn *conn,
+ uint64_t uid)
+{
+ struct smbXcli_session *s = conn->sessions;
+
+ for (; s; s = s->next) {
+ if (s->smb2->session_id != uid) {
+ continue;
+ }
+ break;
+ }
+
+ return s;
+}
+
static NTSTATUS smb2cli_inbuf_parse_compound(struct smbXcli_conn *conn,
uint8_t *buf,
size_t buflen,
TALLOC_CTX *mem_ctx,
- struct iovec **piov, int *pnum_iov)
+ struct iovec **piov,
+ size_t *pnum_iov)
{
struct iovec *iov;
int num_iov = 0;
goto inval;
}
- s = conn->sessions;
- for (; s; s = s->next) {
- if (s->smb2->session_id != uid) {
- continue;
- }
- break;
- }
-
+ s = smbXcli_session_by_uid(conn, uid);
if (s == NULL) {
DEBUG(10, ("unknown session_id %llu\n",
(unsigned long long)uid));
{
struct tevent_req *req;
struct smbXcli_req_state *state = NULL;
- struct iovec *iov;
- int i, num_iov;
+ struct iovec *iov = NULL;
+ size_t i, num_iov = 0;
NTSTATUS status;
bool defer = true;
struct smbXcli_session *last_session = NULL;
}
last_session = session;
- if (state->smb2.should_sign) {
- if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
- return NT_STATUS_ACCESS_DENIED;
- }
- }
-
if (flags & SMB2_HDR_FLAG_SIGNED) {
uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID);
if (session == NULL) {
- struct smbXcli_session *s;
-
- s = state->conn->sessions;
- for (; s; s = s->next) {
- if (s->smb2->session_id != uid) {
- continue;
- }
-
- session = s;
- break;
- }
+ session = smbXcli_session_by_uid(state->conn,
+ uid);
}
if (session == NULL) {
*/
signing_key = NULL;
}
+
+ if (!NT_STATUS_IS_OK(status)) {
+ /*
+ * Only check the signature of the last response
+ * of a successfull session auth. This matches
+ * Windows behaviour for NTLM auth and reauth.
+ */
+ state->smb2.require_signed_response = false;
+ }
+ }
+
+ if (state->smb2.should_sign ||
+ state->smb2.require_signed_response)
+ {
+ if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
+ return NT_STATUS_ACCESS_DENIED;
+ }
+ }
+
+ if (signing_key == NULL && state->smb2.require_signed_response) {
+ signing_key = &session->smb2_channel.signing_key;
}
if (cur[0].iov_len == SMB2_TF_HDR_SIZE) {
}
}
if (signing_key) {
- int cmp;
- static const uint8_t zeros[16];
-
- cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
- zeros,
- 16);
- if (cmp == 0) {
+ bool zero;
+ zero = all_zero(inhdr+SMB2_HDR_SIGNATURE, 16);
+ if (zero) {
state->smb2.signing_skipped = true;
signing_key = NULL;
}
}
if (signing_key) {
- status = smb2_signing_check_pdu(*signing_key,
- state->conn->protocol,
- &cur[1], 3);
- if (!NT_STATUS_IS_OK(status)) {
+ NTSTATUS signing_status;
+
+ signing_status = smb2_signing_check_pdu(*signing_key,
+ state->conn->protocol,
+ &cur[1], 3);
+ if (!NT_STATUS_IS_OK(signing_status)) {
/*
* If the signing check fails, we disconnect
* the connection.
*/
- return status;
+ return signing_status;
}
}
struct smbXcli_conn *conn,
uint32_t timeout_msec,
enum protocol_types min_protocol,
- enum protocol_types max_protocol)
+ enum protocol_types max_protocol,
+ uint16_t max_credits)
{
struct tevent_req *req, *subreq;
struct smbXcli_negprot_state *state;
conn->max_protocol = max_protocol;
conn->protocol = PROTOCOL_NONE;
+ if (max_protocol >= PROTOCOL_SMB2_02) {
+ conn->smb2.max_credits = max_credits;
+ }
+
if ((min_protocol < PROTOCOL_SMB2_02) &&
(max_protocol < PROTOCOL_SMB2_02)) {
/*
*/
conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
- /*
- * As we're starting with an SMB2 negprot, emulate Windows
- * and ask for 31 credits in the initial SMB2 negprot.
- * If we don't and leave requested credits at
- * zero, MacOSX servers return zero credits on
- * the negprot reply and we fail to connect.
- */
- smb2cli_conn_set_max_credits(conn,
- WINDOWS_CLIENT_PURE_SMB2_NEGPROT_INITIAL_CREDIT_ASK);
-
subreq = smbXcli_negprot_smb2_subreq(state);
if (tevent_req_nomem(subreq, req)) {
return tevent_req_post(req, ev);
}
if (conn->protocol == PROTOCOL_NONE) {
+ DBG_ERR("No compatible protocol selected by server.\n");
tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
return;
}
}
SSVAL(p, 0, 2); /* ChiperCount */
- SSVAL(p, 2, SMB2_ENCRYPTION_AES128_GCM);
- SSVAL(p, 4, SMB2_ENCRYPTION_AES128_CCM);
+ /*
+ * For now we preferr CCM because our implementation
+ * is faster than GCM, see bug #11451.
+ */
+ SSVAL(p, 2, SMB2_ENCRYPTION_AES128_CCM);
+ SSVAL(p, 4, SMB2_ENCRYPTION_AES128_GCM);
b = data_blob_const(p, 6);
status = smb2_negotiate_context_add(state, &c,
return;
}
+ /*
+ * Here we are now at SMB3_11, so encryption should be
+ * negotiated via context, not capabilities.
+ */
+
if (conn->smb2.server.capabilities & SMB2_CAP_ENCRYPTION) {
- tevent_req_nterror(req,
- NT_STATUS_INVALID_NETWORK_RESPONSE);
- return;
+ /*
+ * Server set SMB2_CAP_ENCRYPTION capability,
+ * but *SHOULD* not, not *MUST* not. Just mask it off.
+ * NetApp seems to do this:
+ * BUG: https://bugzilla.samba.org/show_bug.cgi?id=13009
+ */
+ conn->smb2.server.capabilities &= ~SMB2_CAP_ENCRYPTION;
}
negotiate_context_offset = IVAL(body, 60);
goto fail;
}
req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
- min_protocol, max_protocol);
+ min_protocol, max_protocol,
+ WINDOWS_CLIENT_PURE_SMB2_NEGPROT_INITIAL_CREDIT_ASK);
if (req == NULL) {
goto fail;
}
tevent_req_done(req);
return;
}
+ if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
+ /*
+ * The response was signed, but not supported
+ *
+ * This might be returned by older Windows versions or by
+ * NetApp SMB server implementations.
+ *
+ * See
+ *
+ * https://blogs.msdn.microsoft.com/openspecification/2012/06/28/smb3-secure-dialect-negotiation/
+ *
+ */
+ tevent_req_done(req);
+ return;
+ }
if (tevent_req_nterror(req, status)) {
return;
}
}
talloc_set_destructor(session, smbXcli_session_destructor);
- DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
+ DLIST_ADD_END(conn->sessions, session);
session->conn = conn;
memcpy(session->smb2_channel.preauth_sha512,
session->smb2_channel = src->smb2_channel;
session->disconnect_expired = src->disconnect_expired;
- DLIST_ADD_END(src->conn->sessions, session, struct smbXcli_session *);
+ DLIST_ADD_END(src->conn->sessions, session);
talloc_set_destructor(session, smbXcli_session_destructor);
return session;
}
+bool smbXcli_session_is_guest(struct smbXcli_session *session)
+{
+ if (session == NULL) {
+ return false;
+ }
+
+ if (session->conn == NULL) {
+ return false;
+ }
+
+ if (session->conn->mandatory_signing) {
+ return false;
+ }
+
+ if (session->conn->protocol >= PROTOCOL_SMB2_02) {
+ if (session->smb2->session_flags & SMB2_SESSION_FLAG_IS_GUEST) {
+ return true;
+ }
+ return false;
+ }
+
+ if (session->smb1.action & SMB_SETUP_GUEST) {
+ return true;
+ }
+
+ return false;
+}
+
bool smbXcli_session_is_authenticated(struct smbXcli_session *session)
{
const DATA_BLOB *application_key;
+ if (session == NULL) {
+ return false;
+ }
+
if (session->conn == NULL) {
return false;
}
session->smb1.session_id = session_id;
}
+void smb1cli_session_set_action(struct smbXcli_session *session,
+ uint16_t action)
+{
+ session->smb1.action = action;
+}
+
NTSTATUS smb1cli_session_set_session_key(struct smbXcli_session *session,
const DATA_BLOB _session_key)
{
if (conn->mandatory_signing) {
security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
}
+ if (session->smb2->should_sign) {
+ security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
+ }
return security_mode;
}
return prev_cs;
}
+uint16_t smb2cli_session_current_channel_sequence(struct smbXcli_session *session)
+{
+ return session->smb2->channel_sequence;
+}
+
void smb2cli_session_start_replay(struct smbXcli_session *session)
{
session->smb2->replay_active = true;
session->smb2->replay_active = false;
}
+void smb2cli_session_require_signed_response(struct smbXcli_session *session,
+ bool require_signed_response)
+{
+ session->smb2->require_signed_response = require_signed_response;
+}
+
NTSTATUS smb2cli_session_update_preauth(struct smbXcli_session *session,
const struct iovec *iov)
{
const struct iovec *recv_iov)
{
struct smbXcli_conn *conn = session->conn;
- uint16_t no_sign_flags;
+ uint16_t no_sign_flags = 0;
uint8_t session_key[16];
bool check_signature = true;
uint32_t hdr_flags;
struct _derivation decryption;
struct _derivation application;
} derivation = { };
+ size_t nonce_size = 0;
if (conn == NULL) {
return NT_STATUS_INVALID_PARAMETER_MIX;
return NT_STATUS_INVALID_PARAMETER_MIX;
}
- no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
+ if (!conn->mandatory_signing) {
+ /*
+ * only allow guest sessions without
+ * mandatory signing.
+ *
+ * If we try an authentication with username != ""
+ * and the server let us in without verifying the
+ * password we don't have a negotiated session key
+ * for signing.
+ */
+ no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST;
+ }
if (session->smb2->session_flags & no_sign_flags) {
session->smb2->should_sign = false;
session->smb2->should_encrypt = false;
}
- generate_random_buffer((uint8_t *)&session->smb2->nonce_high,
- sizeof(session->smb2->nonce_high));
- session->smb2->nonce_low = 1;
+ /*
+ * CCM and GCM algorithms must never have their
+ * nonce wrap, or the security of the whole
+ * communication and the keys is destroyed.
+ * We must drop the connection once we have
+ * transfered too much data.
+ *
+ * NOTE: We assume nonces greater than 8 bytes.
+ */
+ generate_random_buffer((uint8_t *)&session->smb2->nonce_high_random,
+ sizeof(session->smb2->nonce_high_random));
+ switch (conn->smb2.server.cipher) {
+ case SMB2_ENCRYPTION_AES128_CCM:
+ nonce_size = AES_CCM_128_NONCE_SIZE;
+ break;
+ case SMB2_ENCRYPTION_AES128_GCM:
+ nonce_size = AES_GCM_128_IV_SIZE;
+ break;
+ default:
+ nonce_size = 0;
+ break;
+ }
+ session->smb2->nonce_high_max = SMB2_NONCE_HIGH_MAX(nonce_size);
+ session->smb2->nonce_high = 0;
+ session->smb2->nonce_low = 0;
return NT_STATUS_OK;
}
}
talloc_set_destructor(session2, smbXcli_session_destructor);
- DLIST_ADD_END(conn->sessions, session2, struct smbXcli_session *);
+ DLIST_ADD_END(conn->sessions, session2);
session2->conn = conn;
memcpy(session2->smb2_channel.preauth_sha512,
NTSTATUS smb2cli_session_encryption_on(struct smbXcli_session *session)
{
+ if (!session->smb2->should_sign) {
+ /*
+ * We need required signing on the session
+ * in order to prevent man in the middle attacks.
+ */
+ return NT_STATUS_INVALID_PARAMETER_MIX;
+ }
+
if (session->smb2->should_encrypt) {
return NT_STATUS_OK;
}
return tcon;
}
+/*
+ * Return a deep structure copy of a struct smbXcli_tcon *
+ */
+
+struct smbXcli_tcon *smbXcli_tcon_copy(TALLOC_CTX *mem_ctx,
+ const struct smbXcli_tcon *tcon_in)
+{
+ struct smbXcli_tcon *tcon;
+
+ tcon = talloc_memdup(mem_ctx, tcon_in, sizeof(struct smbXcli_tcon));
+ if (tcon == NULL) {
+ return NULL;
+ }
+
+ /* Deal with the SMB1 strings. */
+ if (tcon_in->smb1.service != NULL) {
+ tcon->smb1.service = talloc_strdup(tcon, tcon_in->smb1.service);
+ if (tcon->smb1.service == NULL) {
+ TALLOC_FREE(tcon);
+ return NULL;
+ }
+ }
+ if (tcon->smb1.fs_type != NULL) {
+ tcon->smb1.fs_type = talloc_strdup(tcon, tcon_in->smb1.fs_type);
+ if (tcon->smb1.fs_type == NULL) {
+ TALLOC_FREE(tcon);
+ return NULL;
+ }
+ }
+ return tcon;
+}
+
void smbXcli_tcon_set_fs_attributes(struct smbXcli_tcon *tcon,
uint32_t fs_attributes)
{
return tcon->smb2.tcon_id;
}
+void smb2cli_tcon_set_id(struct smbXcli_tcon *tcon, uint32_t tcon_id)
+{
+ tcon->smb2.tcon_id = tcon_id;
+}
+
uint32_t smb2cli_tcon_capabilities(struct smbXcli_tcon *tcon)
{
return tcon->smb2.capabilities;
}
+uint32_t smb2cli_tcon_flags(struct smbXcli_tcon *tcon)
+{
+ return tcon->smb2.flags;
+}
+
void smb2cli_tcon_set_values(struct smbXcli_tcon *tcon,
struct smbXcli_session *session,
uint32_t tcon_id,
{
return tcon->smb2.should_encrypt;
}
+
+void smb2cli_conn_set_mid(struct smbXcli_conn *conn, uint64_t mid)
+{
+ conn->smb2.mid = mid;
+}
+
+uint64_t smb2cli_conn_get_mid(struct smbXcli_conn *conn)
+{
+ return conn->smb2.mid;
+}