1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
44 /* device_offload_lock is used to synchronize tls_dev_add
45 * against NETDEV_DOWN notifications.
47 static DECLARE_RWSEM(device_offload_lock);
49 static struct workqueue_struct *destruct_wq __read_mostly;
51 static LIST_HEAD(tls_device_list);
52 static LIST_HEAD(tls_device_down_list);
53 static DEFINE_SPINLOCK(tls_device_lock);
55 static void tls_device_free_ctx(struct tls_context *ctx)
57 if (ctx->tx_conf == TLS_HW) {
58 kfree(tls_offload_ctx_tx(ctx));
59 kfree(ctx->tx.rec_seq);
63 if (ctx->rx_conf == TLS_HW)
64 kfree(tls_offload_ctx_rx(ctx));
66 tls_ctx_free(NULL, ctx);
69 static void tls_device_tx_del_task(struct work_struct *work)
71 struct tls_offload_context_tx *offload_ctx =
72 container_of(work, struct tls_offload_context_tx, destruct_work);
73 struct tls_context *ctx = offload_ctx->ctx;
74 struct net_device *netdev;
76 /* Safe, because this is the destroy flow, refcount is 0, so
77 * tls_device_down can't store this field in parallel.
79 netdev = rcu_dereference_protected(ctx->netdev,
80 !refcount_read(&ctx->refcount));
82 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
85 tls_device_free_ctx(ctx);
88 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
90 struct net_device *netdev;
94 spin_lock_irqsave(&tls_device_lock, flags);
95 if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
96 spin_unlock_irqrestore(&tls_device_lock, flags);
100 list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
102 /* Safe, because this is the destroy flow, refcount is 0, so
103 * tls_device_down can't store this field in parallel.
105 netdev = rcu_dereference_protected(ctx->netdev,
106 !refcount_read(&ctx->refcount));
108 async_cleanup = netdev && ctx->tx_conf == TLS_HW;
110 struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
112 /* queue_work inside the spinlock
113 * to make sure tls_device_down waits for that work.
115 queue_work(destruct_wq, &offload_ctx->destruct_work);
117 spin_unlock_irqrestore(&tls_device_lock, flags);
120 tls_device_free_ctx(ctx);
123 /* We assume that the socket is already connected */
124 static struct net_device *get_netdev_for_sock(struct sock *sk)
126 struct dst_entry *dst = sk_dst_get(sk);
127 struct net_device *netdev = NULL;
130 netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
139 static void destroy_record(struct tls_record_info *record)
143 for (i = 0; i < record->num_frags; i++)
144 __skb_frag_unref(&record->frags[i], false);
148 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
150 struct tls_record_info *info, *temp;
152 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
153 list_del(&info->list);
154 destroy_record(info);
157 offload_ctx->retransmit_hint = NULL;
160 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
162 struct tls_context *tls_ctx = tls_get_ctx(sk);
163 struct tls_record_info *info, *temp;
164 struct tls_offload_context_tx *ctx;
165 u64 deleted_records = 0;
171 ctx = tls_offload_ctx_tx(tls_ctx);
173 spin_lock_irqsave(&ctx->lock, flags);
174 info = ctx->retransmit_hint;
175 if (info && !before(acked_seq, info->end_seq))
176 ctx->retransmit_hint = NULL;
178 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
179 if (before(acked_seq, info->end_seq))
181 list_del(&info->list);
183 destroy_record(info);
187 ctx->unacked_record_sn += deleted_records;
188 spin_unlock_irqrestore(&ctx->lock, flags);
191 /* At this point, there should be no references on this
192 * socket and no in-flight SKBs associated with this
193 * socket, so it is safe to free all the resources.
195 void tls_device_sk_destruct(struct sock *sk)
197 struct tls_context *tls_ctx = tls_get_ctx(sk);
198 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
200 tls_ctx->sk_destruct(sk);
202 if (tls_ctx->tx_conf == TLS_HW) {
203 if (ctx->open_record)
204 destroy_record(ctx->open_record);
205 delete_all_records(ctx);
206 crypto_free_aead(ctx->aead_send);
207 clean_acked_data_disable(inet_csk(sk));
210 tls_device_queue_ctx_destruction(tls_ctx);
212 EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
214 void tls_device_free_resources_tx(struct sock *sk)
216 struct tls_context *tls_ctx = tls_get_ctx(sk);
218 tls_free_partial_record(sk, tls_ctx);
221 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
223 struct tls_context *tls_ctx = tls_get_ctx(sk);
225 trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
226 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
228 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
230 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
233 struct net_device *netdev;
238 skb = tcp_write_queue_tail(sk);
240 TCP_SKB_CB(skb)->eor = 1;
242 rcd_sn = tls_ctx->tx.rec_seq;
244 trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
245 down_read(&device_offload_lock);
246 netdev = rcu_dereference_protected(tls_ctx->netdev,
247 lockdep_is_held(&device_offload_lock));
249 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
251 TLS_OFFLOAD_CTX_DIR_TX);
252 up_read(&device_offload_lock);
256 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
259 static void tls_append_frag(struct tls_record_info *record,
260 struct page_frag *pfrag,
265 frag = &record->frags[record->num_frags - 1];
266 if (skb_frag_page(frag) == pfrag->page &&
267 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
268 skb_frag_size_add(frag, size);
271 skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
274 get_page(pfrag->page);
277 pfrag->offset += size;
281 static int tls_push_record(struct sock *sk,
282 struct tls_context *ctx,
283 struct tls_offload_context_tx *offload_ctx,
284 struct tls_record_info *record,
287 struct tls_prot_info *prot = &ctx->prot_info;
288 struct tcp_sock *tp = tcp_sk(sk);
292 record->end_seq = tp->write_seq + record->len;
293 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
294 offload_ctx->open_record = NULL;
296 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
297 tls_device_resync_tx(sk, ctx, tp->write_seq);
299 tls_advance_record_sn(sk, prot, &ctx->tx);
301 for (i = 0; i < record->num_frags; i++) {
302 frag = &record->frags[i];
303 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
304 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
305 skb_frag_size(frag), skb_frag_off(frag));
306 sk_mem_charge(sk, skb_frag_size(frag));
307 get_page(skb_frag_page(frag));
309 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
311 /* all ready, send */
312 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
315 static int tls_device_record_close(struct sock *sk,
316 struct tls_context *ctx,
317 struct tls_record_info *record,
318 struct page_frag *pfrag,
319 unsigned char record_type)
321 struct tls_prot_info *prot = &ctx->prot_info;
325 * device will fill in the tag, we just need to append a placeholder
326 * use socket memory to improve coalescing (re-using a single buffer
327 * increases frag count)
328 * if we can't allocate memory now, steal some back from data
330 if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
331 sk->sk_allocation))) {
333 tls_append_frag(record, pfrag, prot->tag_size);
335 ret = prot->tag_size;
336 if (record->len <= prot->overhead_size)
341 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
342 record->len - prot->overhead_size,
347 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
348 struct page_frag *pfrag,
351 struct tls_record_info *record;
354 record = kmalloc(sizeof(*record), GFP_KERNEL);
358 frag = &record->frags[0];
359 skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
362 get_page(pfrag->page);
363 pfrag->offset += prepend_size;
365 record->num_frags = 1;
366 record->len = prepend_size;
367 offload_ctx->open_record = record;
371 static int tls_do_allocation(struct sock *sk,
372 struct tls_offload_context_tx *offload_ctx,
373 struct page_frag *pfrag,
378 if (!offload_ctx->open_record) {
379 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
380 sk->sk_allocation))) {
381 READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
382 sk_stream_moderate_sndbuf(sk);
386 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
390 if (pfrag->size > pfrag->offset)
394 if (!sk_page_frag_refill(sk, pfrag))
400 static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
402 size_t pre_copy, nocache;
404 pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
406 pre_copy = min(pre_copy, bytes);
407 if (copy_from_iter(addr, pre_copy, i) != pre_copy)
413 nocache = round_down(bytes, SMP_CACHE_BYTES);
414 if (copy_from_iter_nocache(addr, nocache, i) != nocache)
419 if (bytes && copy_from_iter(addr, bytes, i) != bytes)
425 static int tls_push_data(struct sock *sk,
426 struct iov_iter *iter,
427 size_t size, int flags,
428 unsigned char record_type)
430 struct tls_context *tls_ctx = tls_get_ctx(sk);
431 struct tls_prot_info *prot = &tls_ctx->prot_info;
432 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
433 struct tls_record_info *record;
434 int tls_push_record_flags;
435 struct page_frag *pfrag;
436 size_t orig_size = size;
437 u32 max_open_record_len;
444 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST |
448 if (unlikely(sk->sk_err))
451 flags |= MSG_SENDPAGE_DECRYPTED;
452 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
454 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
455 if (tls_is_partially_sent_record(tls_ctx)) {
456 rc = tls_push_partial_record(sk, tls_ctx, flags);
461 pfrag = sk_page_frag(sk);
463 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
464 * we need to leave room for an authentication tag.
466 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
469 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
471 rc = sk_stream_wait_memory(sk, &timeo);
475 record = ctx->open_record;
479 if (record_type != TLS_RECORD_TYPE_DATA) {
480 /* avoid sending partial
481 * record with type !=
485 destroy_record(record);
486 ctx->open_record = NULL;
487 } else if (record->len > prot->prepend_size) {
494 record = ctx->open_record;
496 copy = min_t(size_t, size, max_open_record_len - record->len);
497 if (copy && (flags & MSG_SPLICE_PAGES)) {
498 struct page_frag zc_pfrag;
499 struct page **pages = &zc_pfrag.page;
502 rc = iov_iter_extract_pages(iter, &pages,
511 if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
512 iov_iter_revert(iter, copy);
517 zc_pfrag.offset = off;
518 zc_pfrag.size = copy;
519 tls_append_frag(record, &zc_pfrag, copy);
521 copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
523 rc = tls_device_copy_data(page_address(pfrag->page) +
528 tls_append_frag(record, pfrag, copy);
534 tls_push_record_flags = flags;
535 if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
543 if (done || record->len >= max_open_record_len ||
544 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
545 rc = tls_device_record_close(sk, tls_ctx, record,
552 destroy_record(record);
553 ctx->open_record = NULL;
558 rc = tls_push_record(sk,
562 tls_push_record_flags);
568 tls_ctx->pending_open_record_frags = more;
570 if (orig_size - size > 0)
571 rc = orig_size - size;
576 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
578 unsigned char record_type = TLS_RECORD_TYPE_DATA;
579 struct tls_context *tls_ctx = tls_get_ctx(sk);
582 if (!tls_ctx->zerocopy_sendfile)
583 msg->msg_flags &= ~MSG_SPLICE_PAGES;
585 mutex_lock(&tls_ctx->tx_lock);
588 if (unlikely(msg->msg_controllen)) {
589 rc = tls_process_cmsg(sk, msg, &record_type);
594 rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
599 mutex_unlock(&tls_ctx->tx_lock);
603 void tls_device_splice_eof(struct socket *sock)
605 struct sock *sk = sock->sk;
606 struct tls_context *tls_ctx = tls_get_ctx(sk);
607 struct iov_iter iter = {};
609 if (!tls_is_partially_sent_record(tls_ctx))
612 mutex_lock(&tls_ctx->tx_lock);
615 if (tls_is_partially_sent_record(tls_ctx)) {
616 iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
617 tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
621 mutex_unlock(&tls_ctx->tx_lock);
624 int tls_device_sendpage(struct sock *sk, struct page *page,
625 int offset, size_t size, int flags)
628 struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
630 if (flags & MSG_SENDPAGE_NOTLAST)
631 msg.msg_flags |= MSG_MORE;
636 bvec_set_page(&bvec, page, size, offset);
637 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
638 return tls_device_sendmsg(sk, &msg, size);
641 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
642 u32 seq, u64 *p_record_sn)
644 u64 record_sn = context->hint_record_sn;
645 struct tls_record_info *info, *last;
647 info = context->retransmit_hint;
649 before(seq, info->end_seq - info->len)) {
650 /* if retransmit_hint is irrelevant start
651 * from the beginning of the list
653 info = list_first_entry_or_null(&context->records_list,
654 struct tls_record_info, list);
657 /* send the start_marker record if seq number is before the
658 * tls offload start marker sequence number. This record is
659 * required to handle TCP packets which are before TLS offload
661 * And if it's not start marker, look if this seq number
662 * belongs to the list.
664 if (likely(!tls_record_is_start_marker(info))) {
665 /* we have the first record, get the last record to see
666 * if this seq number belongs to the list.
668 last = list_last_entry(&context->records_list,
669 struct tls_record_info, list);
671 if (!between(seq, tls_record_start_seq(info),
675 record_sn = context->unacked_record_sn;
678 /* We just need the _rcu for the READ_ONCE() */
680 list_for_each_entry_from_rcu(info, &context->records_list, list) {
681 if (before(seq, info->end_seq)) {
682 if (!context->retransmit_hint ||
684 context->retransmit_hint->end_seq)) {
685 context->hint_record_sn = record_sn;
686 context->retransmit_hint = info;
688 *p_record_sn = record_sn;
689 goto exit_rcu_unlock;
699 EXPORT_SYMBOL(tls_get_record);
701 static int tls_device_push_pending_record(struct sock *sk, int flags)
703 struct iov_iter iter;
705 iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
706 return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
709 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
711 if (tls_is_partially_sent_record(ctx)) {
712 gfp_t sk_allocation = sk->sk_allocation;
714 WARN_ON_ONCE(sk->sk_write_pending);
716 sk->sk_allocation = GFP_ATOMIC;
717 tls_push_partial_record(sk, ctx,
718 MSG_DONTWAIT | MSG_NOSIGNAL |
719 MSG_SENDPAGE_DECRYPTED);
720 sk->sk_allocation = sk_allocation;
724 static void tls_device_resync_rx(struct tls_context *tls_ctx,
725 struct sock *sk, u32 seq, u8 *rcd_sn)
727 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
728 struct net_device *netdev;
730 trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
732 netdev = rcu_dereference(tls_ctx->netdev);
734 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
735 TLS_OFFLOAD_CTX_DIR_RX);
737 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
741 tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
742 s64 resync_req, u32 *seq, u16 *rcd_delta)
744 u32 is_async = resync_req & RESYNC_REQ_ASYNC;
745 u32 req_seq = resync_req >> 32;
746 u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
752 /* shouldn't get to wraparound:
753 * too long in async stage, something bad happened
755 if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
758 /* asynchronous stage: log all headers seq such that
759 * req_seq <= seq <= end_seq, and wait for real resync request
761 if (before(*seq, req_seq))
763 if (!after(*seq, req_end) &&
764 resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
765 resync_async->log[resync_async->loglen++] = *seq;
767 resync_async->rcd_delta++;
772 /* synchronous stage: check against the logged entries and
773 * proceed to check the next entries if no match was found
775 for (i = 0; i < resync_async->loglen; i++)
776 if (req_seq == resync_async->log[i] &&
777 atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
778 *rcd_delta = resync_async->rcd_delta - i;
780 resync_async->loglen = 0;
781 resync_async->rcd_delta = 0;
785 resync_async->loglen = 0;
786 resync_async->rcd_delta = 0;
788 if (req_seq == *seq &&
789 atomic64_try_cmpxchg(&resync_async->req,
796 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
798 struct tls_context *tls_ctx = tls_get_ctx(sk);
799 struct tls_offload_context_rx *rx_ctx;
800 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
801 u32 sock_data, is_req_pending;
802 struct tls_prot_info *prot;
807 if (tls_ctx->rx_conf != TLS_HW)
809 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
812 prot = &tls_ctx->prot_info;
813 rx_ctx = tls_offload_ctx_rx(tls_ctx);
814 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
816 switch (rx_ctx->resync_type) {
817 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
818 resync_req = atomic64_read(&rx_ctx->resync_req);
819 req_seq = resync_req >> 32;
820 seq += TLS_HEADER_SIZE - 1;
821 is_req_pending = resync_req;
823 if (likely(!is_req_pending) || req_seq != seq ||
824 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
827 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
828 if (likely(!rx_ctx->resync_nh_do_now))
831 /* head of next rec is already in, note that the sock_inq will
832 * include the currently parsed message when called from parser
834 sock_data = tcp_inq(sk);
835 if (sock_data > rcd_len) {
836 trace_tls_device_rx_resync_nh_delay(sk, sock_data,
841 rx_ctx->resync_nh_do_now = 0;
843 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
845 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
846 resync_req = atomic64_read(&rx_ctx->resync_async->req);
847 is_req_pending = resync_req;
848 if (likely(!is_req_pending))
851 if (!tls_device_rx_resync_async(rx_ctx->resync_async,
852 resync_req, &seq, &rcd_delta))
854 tls_bigint_subtract(rcd_sn, rcd_delta);
858 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
861 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
862 struct tls_offload_context_rx *ctx,
863 struct sock *sk, struct sk_buff *skb)
865 struct strp_msg *rxm;
867 /* device will request resyncs by itself based on stream scan */
868 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
870 /* already scheduled */
871 if (ctx->resync_nh_do_now)
873 /* seen decrypted fragments since last fully-failed record */
874 if (ctx->resync_nh_reset) {
875 ctx->resync_nh_reset = 0;
876 ctx->resync_nh.decrypted_failed = 1;
877 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
881 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
884 /* doing resync, bump the next target in case it fails */
885 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
886 ctx->resync_nh.decrypted_tgt *= 2;
888 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
892 /* head of next rec is already in, parser will sync for us */
893 if (tcp_inq(sk) > rxm->full_len) {
894 trace_tls_device_rx_resync_nh_schedule(sk);
895 ctx->resync_nh_do_now = 1;
897 struct tls_prot_info *prot = &tls_ctx->prot_info;
898 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
900 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
901 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
903 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
909 tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
911 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
912 const struct tls_cipher_size_desc *cipher_sz;
913 int err, offset, copy, data_len, pos;
914 struct sk_buff *skb, *skb_iter;
915 struct scatterlist sg[1];
916 struct strp_msg *rxm;
917 char *orig_buf, *buf;
919 switch (tls_ctx->crypto_recv.info.cipher_type) {
920 case TLS_CIPHER_AES_GCM_128:
921 case TLS_CIPHER_AES_GCM_256:
926 cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type];
928 rxm = strp_msg(tls_strp_msg(sw_ctx));
929 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv,
935 err = tls_strp_msg_cow(sw_ctx);
939 skb = tls_strp_msg(sw_ctx);
941 offset = rxm->offset;
943 sg_init_table(sg, 1);
944 sg_set_buf(&sg[0], buf,
945 rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv);
946 err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv);
950 /* We are interested only in the decrypted data not the auth */
951 err = decrypt_skb(sk, sg);
957 data_len = rxm->full_len - cipher_sz->tag;
959 if (skb_pagelen(skb) > offset) {
960 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
962 if (skb->decrypted) {
963 err = skb_store_bits(skb, offset, buf, copy);
972 pos = skb_pagelen(skb);
973 skb_walk_frags(skb, skb_iter) {
976 /* Practically all frags must belong to msg if reencrypt
977 * is needed with current strparser and coalescing logic,
978 * but strparser may "get optimized", so let's be safe.
980 if (pos + skb_iter->len <= offset)
982 if (pos >= data_len + rxm->offset)
985 frag_pos = offset - pos;
986 copy = min_t(int, skb_iter->len - frag_pos,
987 data_len + rxm->offset - offset);
989 if (skb_iter->decrypted) {
990 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
998 pos += skb_iter->len;
1006 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
1008 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
1009 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
1010 struct sk_buff *skb = tls_strp_msg(sw_ctx);
1011 struct strp_msg *rxm = strp_msg(skb);
1012 int is_decrypted, is_encrypted;
1014 if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
1015 is_decrypted = skb->decrypted;
1016 is_encrypted = !is_decrypted;
1022 trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
1023 tls_ctx->rx.rec_seq, rxm->full_len,
1024 is_encrypted, is_decrypted);
1026 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
1027 if (likely(is_encrypted || is_decrypted))
1028 return is_decrypted;
1030 /* After tls_device_down disables the offload, the next SKB will
1031 * likely have initial fragments decrypted, and final ones not
1032 * decrypted. We need to reencrypt that single SKB.
1034 return tls_device_reencrypt(sk, tls_ctx);
1037 /* Return immediately if the record is either entirely plaintext or
1038 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1042 ctx->resync_nh_reset = 1;
1043 return is_decrypted;
1046 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1050 ctx->resync_nh_reset = 1;
1051 return tls_device_reencrypt(sk, tls_ctx);
1054 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1055 struct net_device *netdev)
1057 if (sk->sk_destruct != tls_device_sk_destruct) {
1058 refcount_set(&ctx->refcount, 1);
1060 RCU_INIT_POINTER(ctx->netdev, netdev);
1061 spin_lock_irq(&tls_device_lock);
1062 list_add_tail(&ctx->list, &tls_device_list);
1063 spin_unlock_irq(&tls_device_lock);
1065 ctx->sk_destruct = sk->sk_destruct;
1066 smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1070 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
1072 struct tls_context *tls_ctx = tls_get_ctx(sk);
1073 struct tls_prot_info *prot = &tls_ctx->prot_info;
1074 const struct tls_cipher_size_desc *cipher_sz;
1075 struct tls_record_info *start_marker_record;
1076 struct tls_offload_context_tx *offload_ctx;
1077 struct tls_crypto_info *crypto_info;
1078 struct net_device *netdev;
1080 struct sk_buff *skb;
1087 if (ctx->priv_ctx_tx)
1090 netdev = get_netdev_for_sock(sk);
1092 pr_err_ratelimited("%s: netdev not found\n", __func__);
1096 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1098 goto release_netdev;
1101 crypto_info = &ctx->crypto_send.info;
1102 if (crypto_info->version != TLS_1_2_VERSION) {
1104 goto release_netdev;
1107 switch (crypto_info->cipher_type) {
1108 case TLS_CIPHER_AES_GCM_128:
1109 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1111 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1113 case TLS_CIPHER_AES_GCM_256:
1114 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
1116 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
1120 goto release_netdev;
1122 cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
1124 /* Sanity-check the rec_seq_size for stack allocations */
1125 if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
1127 goto release_netdev;
1130 prot->version = crypto_info->version;
1131 prot->cipher_type = crypto_info->cipher_type;
1132 prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv;
1133 prot->tag_size = cipher_sz->tag;
1134 prot->overhead_size = prot->prepend_size + prot->tag_size;
1135 prot->iv_size = cipher_sz->iv;
1136 prot->salt_size = cipher_sz->salt;
1137 ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL);
1140 goto release_netdev;
1143 memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv);
1145 prot->rec_seq_size = cipher_sz->rec_seq;
1146 ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL);
1147 if (!ctx->tx.rec_seq) {
1152 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1153 if (!start_marker_record) {
1158 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
1161 goto free_marker_record;
1164 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1166 goto free_offload_ctx;
1168 /* start at rec_seq - 1 to account for the start marker record */
1169 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1170 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1172 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1173 start_marker_record->len = 0;
1174 start_marker_record->num_frags = 0;
1176 INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
1177 offload_ctx->ctx = ctx;
1179 INIT_LIST_HEAD(&offload_ctx->records_list);
1180 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1181 spin_lock_init(&offload_ctx->lock);
1182 sg_init_table(offload_ctx->sg_tx_data,
1183 ARRAY_SIZE(offload_ctx->sg_tx_data));
1185 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1186 ctx->push_pending_record = tls_device_push_pending_record;
1188 /* TLS offload is greatly simplified if we don't send
1189 * SKBs where only part of the payload needs to be encrypted.
1190 * So mark the last skb in the write queue as end of record.
1192 skb = tcp_write_queue_tail(sk);
1194 TCP_SKB_CB(skb)->eor = 1;
1196 /* Avoid offloading if the device is down
1197 * We don't want to offload new flows after
1198 * the NETDEV_DOWN event
1200 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1201 * handler thus protecting from the device going down before
1202 * ctx was added to tls_device_list.
1204 down_read(&device_offload_lock);
1205 if (!(netdev->flags & IFF_UP)) {
1210 ctx->priv_ctx_tx = offload_ctx;
1211 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1212 &ctx->crypto_send.info,
1213 tcp_sk(sk)->write_seq);
1214 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1215 tcp_sk(sk)->write_seq, rec_seq, rc);
1219 tls_device_attach(ctx, sk, netdev);
1220 up_read(&device_offload_lock);
1222 /* following this assignment tls_is_skb_tx_device_offloaded
1223 * will return true and the context might be accessed
1224 * by the netdev's xmit function.
1226 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1232 up_read(&device_offload_lock);
1233 clean_acked_data_disable(inet_csk(sk));
1234 crypto_free_aead(offload_ctx->aead_send);
1237 ctx->priv_ctx_tx = NULL;
1239 kfree(start_marker_record);
1241 kfree(ctx->tx.rec_seq);
1249 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1251 struct tls12_crypto_info_aes_gcm_128 *info;
1252 struct tls_offload_context_rx *context;
1253 struct net_device *netdev;
1256 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1259 netdev = get_netdev_for_sock(sk);
1261 pr_err_ratelimited("%s: netdev not found\n", __func__);
1265 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1267 goto release_netdev;
1270 /* Avoid offloading if the device is down
1271 * We don't want to offload new flows after
1272 * the NETDEV_DOWN event
1274 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1275 * handler thus protecting from the device going down before
1276 * ctx was added to tls_device_list.
1278 down_read(&device_offload_lock);
1279 if (!(netdev->flags & IFF_UP)) {
1284 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1289 context->resync_nh_reset = 1;
1291 ctx->priv_ctx_rx = context;
1292 rc = tls_set_sw_offload(sk, ctx, 0);
1296 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1297 &ctx->crypto_recv.info,
1298 tcp_sk(sk)->copied_seq);
1299 info = (void *)&ctx->crypto_recv.info;
1300 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1301 tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1303 goto free_sw_resources;
1305 tls_device_attach(ctx, sk, netdev);
1306 up_read(&device_offload_lock);
1313 up_read(&device_offload_lock);
1314 tls_sw_free_resources_rx(sk);
1315 down_read(&device_offload_lock);
1317 ctx->priv_ctx_rx = NULL;
1319 up_read(&device_offload_lock);
1325 void tls_device_offload_cleanup_rx(struct sock *sk)
1327 struct tls_context *tls_ctx = tls_get_ctx(sk);
1328 struct net_device *netdev;
1330 down_read(&device_offload_lock);
1331 netdev = rcu_dereference_protected(tls_ctx->netdev,
1332 lockdep_is_held(&device_offload_lock));
1336 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1337 TLS_OFFLOAD_CTX_DIR_RX);
1339 if (tls_ctx->tx_conf != TLS_HW) {
1341 rcu_assign_pointer(tls_ctx->netdev, NULL);
1343 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1346 up_read(&device_offload_lock);
1347 tls_sw_release_resources_rx(sk);
1350 static int tls_device_down(struct net_device *netdev)
1352 struct tls_context *ctx, *tmp;
1353 unsigned long flags;
1356 /* Request a write lock to block new offload attempts */
1357 down_write(&device_offload_lock);
1359 spin_lock_irqsave(&tls_device_lock, flags);
1360 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1361 struct net_device *ctx_netdev =
1362 rcu_dereference_protected(ctx->netdev,
1363 lockdep_is_held(&device_offload_lock));
1365 if (ctx_netdev != netdev ||
1366 !refcount_inc_not_zero(&ctx->refcount))
1369 list_move(&ctx->list, &list);
1371 spin_unlock_irqrestore(&tls_device_lock, flags);
1373 list_for_each_entry_safe(ctx, tmp, &list, list) {
1374 /* Stop offloaded TX and switch to the fallback.
1375 * tls_is_skb_tx_device_offloaded will return false.
1377 WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1379 /* Stop the RX and TX resync.
1380 * tls_dev_resync must not be called after tls_dev_del.
1382 rcu_assign_pointer(ctx->netdev, NULL);
1384 /* Start skipping the RX resync logic completely. */
1385 set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1387 /* Sync with inflight packets. After this point:
1388 * TX: no non-encrypted packets will be passed to the driver.
1389 * RX: resync requests from the driver will be ignored.
1393 /* Release the offload context on the driver side. */
1394 if (ctx->tx_conf == TLS_HW)
1395 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1396 TLS_OFFLOAD_CTX_DIR_TX);
1397 if (ctx->rx_conf == TLS_HW &&
1398 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1399 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1400 TLS_OFFLOAD_CTX_DIR_RX);
1404 /* Move the context to a separate list for two reasons:
1405 * 1. When the context is deallocated, list_del is called.
1406 * 2. It's no longer an offloaded context, so we don't want to
1407 * run offload-specific code on this context.
1409 spin_lock_irqsave(&tls_device_lock, flags);
1410 list_move_tail(&ctx->list, &tls_device_down_list);
1411 spin_unlock_irqrestore(&tls_device_lock, flags);
1413 /* Device contexts for RX and TX will be freed in on sk_destruct
1414 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1415 * Now release the ref taken above.
1417 if (refcount_dec_and_test(&ctx->refcount)) {
1418 /* sk_destruct ran after tls_device_down took a ref, and
1419 * it returned early. Complete the destruction here.
1421 list_del(&ctx->list);
1422 tls_device_free_ctx(ctx);
1426 up_write(&device_offload_lock);
1428 flush_workqueue(destruct_wq);
1433 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1436 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1438 if (!dev->tlsdev_ops &&
1439 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1443 case NETDEV_REGISTER:
1444 case NETDEV_FEAT_CHANGE:
1445 if (netif_is_bond_master(dev))
1447 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1448 !dev->tlsdev_ops->tls_dev_resync)
1451 if (dev->tlsdev_ops &&
1452 dev->tlsdev_ops->tls_dev_add &&
1453 dev->tlsdev_ops->tls_dev_del)
1458 return tls_device_down(dev);
1463 static struct notifier_block tls_dev_notifier = {
1464 .notifier_call = tls_dev_event,
1467 int __init tls_device_init(void)
1471 destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
1475 err = register_netdevice_notifier(&tls_dev_notifier);
1477 destroy_workqueue(destruct_wq);
1482 void __exit tls_device_cleanup(void)
1484 unregister_netdevice_notifier(&tls_dev_notifier);
1485 destroy_workqueue(destruct_wq);
1486 clean_acked_data_flush();