Merge branch 'skb-frag-kmap_atomic-fixes'
authorJakub Kicinski <kuba@kernel.org>
Tue, 12 Jan 2021 02:20:12 +0000 (18:20 -0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 12 Jan 2021 02:20:12 +0000 (18:20 -0800)
Willem de Bruijn says:

====================
skb frag: kmap_atomic fixes

skb frags may be backed by highmem and/or compound pages. Various
code calls kmap_atomic to safely access highmem pages. But this
needs additional care for compound pages. Fix a few issues:

patch 1 expect kmap mappings with CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
patch 2 fixes kmap_atomic + compound page support in skb_seq_read
patch 3 fixes kmap_atomic + compound page support in esp
====================

Link: https://lore.kernel.org/r/20210109221834.3459768-1-willemdebruijn.kernel@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/linux/skbuff.h
net/core/skbuff.c
net/ipv4/esp4.c
net/ipv6/esp6.c

index 333bcdc39635bcfd2d2682891831bae613c6fa5e..5f60c9e907c9d8eae1e85ae0329838383e3325df 100644 (file)
@@ -366,7 +366,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 static inline bool skb_frag_must_loop(struct page *p)
 {
 #if defined(CONFIG_HIGHMEM)
-       if (PageHighMem(p))
+       if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
                return true;
 #endif
        return false;
@@ -1203,6 +1203,7 @@ struct skb_seq_state {
        struct sk_buff  *root_skb;
        struct sk_buff  *cur_skb;
        __u8            *frag_data;
+       __u32           frag_off;
 };
 
 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
index b6f2b520a9b747b679c12c59f710b50f29ef1d36..0da035c1e53f1c07eb3a4faedc72660654cfa8e4 100644 (file)
@@ -3442,6 +3442,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->root_skb = st->cur_skb = skb;
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
+       st->frag_off = 0;
 }
 EXPORT_SYMBOL(skb_prepare_seq_read);
 
@@ -3496,14 +3497,27 @@ next_skb:
                st->stepped_offset += skb_headlen(st->cur_skb);
 
        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
+               unsigned int pg_idx, pg_off, pg_sz;
+
                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
-               block_limit = skb_frag_size(frag) + st->stepped_offset;
 
+               pg_idx = 0;
+               pg_off = skb_frag_off(frag);
+               pg_sz = skb_frag_size(frag);
+
+               if (skb_frag_must_loop(skb_frag_page(frag))) {
+                       pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
+                       pg_off = offset_in_page(pg_off + st->frag_off);
+                       pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
+                                                   PAGE_SIZE - pg_off);
+               }
+
+               block_limit = pg_sz + st->stepped_offset;
                if (abs_offset < block_limit) {
                        if (!st->frag_data)
-                               st->frag_data = kmap_atomic(skb_frag_page(frag));
+                               st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
 
-                       *data = (u8 *) st->frag_data + skb_frag_off(frag) +
+                       *data = (u8 *)st->frag_data + pg_off +
                                (abs_offset - st->stepped_offset);
 
                        return block_limit - abs_offset;
@@ -3514,8 +3528,12 @@ next_skb:
                        st->frag_data = NULL;
                }
 
-               st->frag_idx++;
-               st->stepped_offset += skb_frag_size(frag);
+               st->stepped_offset += pg_sz;
+               st->frag_off += pg_sz;
+               if (st->frag_off == skb_frag_size(frag)) {
+                       st->frag_off = 0;
+                       st->frag_idx++;
+               }
        }
 
        if (st->frag_data) {
index 8b07f3a4f2db2509bed581ab549c09b621c8b7ea..a3271ec3e1627fb4f6e29da0e0fb1a638fe7e789 100644 (file)
@@ -443,7 +443,6 @@ static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        u8 *tail;
-       u8 *vaddr;
        int nfrags;
        int esph_offset;
        struct page *page;
@@ -485,14 +484,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        page = pfrag->page;
                        get_page(page);
 
-                       vaddr = kmap_atomic(page);
-
-                       tail = vaddr + pfrag->offset;
+                       tail = page_address(page) + pfrag->offset;
 
                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 
-                       kunmap_atomic(vaddr);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
index 52c2f063529fbf9e33326eb6d3887f8c810b26eb..2b804fcebcc6511d32f18d99da596f9221e98551 100644 (file)
@@ -478,7 +478,6 @@ static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        u8 *tail;
-       u8 *vaddr;
        int nfrags;
        int esph_offset;
        struct page *page;
@@ -519,14 +518,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        page = pfrag->page;
                        get_page(page);
 
-                       vaddr = kmap_atomic(page);
-
-                       tail = vaddr + pfrag->offset;
+                       tail = page_address(page) + pfrag->offset;
 
                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 
-                       kunmap_atomic(vaddr);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,