bpf: Fix bug in mmap() implementation for BPF array map
authorAndrii Nakryiko <andriin@fb.com>
Tue, 12 May 2020 23:59:25 +0000 (16:59 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 14 May 2020 19:40:04 +0000 (12:40 -0700)
mmap() subsystem allows user-space application to memory-map region with
initial page offset. This wasn't taken into account in initial implementation
of BPF array memory-mapping. This would result in wrong pages, not taking into
account requested page shift, being memory-mmaped into user-space. This patch
fixes this gap and adds a test for such scenario.

Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY")
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200512235925.3817805-1-andriin@fb.com
kernel/bpf/arraymap.c
tools/testing/selftests/bpf/prog_tests/mmap.c

index 95d77770353c9bb5af0311f5813471e11d70c86e..1d6120fd5ba687293cef2bda8902d9c97f64d22f 100644 (file)
@@ -486,7 +486,12 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
        if (!(map->map_flags & BPF_F_MMAPABLE))
                return -EINVAL;
 
-       return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff);
+       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
+           PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
+               return -EINVAL;
+
+       return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
+                                  vma->vm_pgoff + pgoff);
 }
 
 const struct bpf_map_ops array_map_ops = {
index 56d80adcf4bdec8d074d1036a87333901e2bfa59..6b9dce431d4165425981adbfc65f1ca4fde19328 100644 (file)
@@ -217,6 +217,14 @@ void test_mmap(void)
 
        munmap(tmp2, 4 * page_size);
 
+       /* map all 4 pages, but with pg_off=1 page, should fail */
+       tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
+                   data_map_fd, page_size /* initial page shift */);
+       if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
+               munmap(tmp1, 4 * page_size);
+               goto cleanup;
+       }
+
        tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
        if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
                goto cleanup;