mm: remove vmalloc_user_node_flags
[openwrt/staging/blogic.git] / kernel / bpf / syscall.c
index 9c1cf7a87fb3488be59cc5ed543c58ae8e5512fd..42c7a42fc9c8ea8b5e8ac3b12b7fe982d5560a63 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/nospec.h>
 #include <linux/audit.h>
 #include <uapi/linux/btf.h>
+#include <asm/pgtable.h>
 #include <linux/bpf_lsm.h>
 
 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
@@ -281,26 +282,29 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
         * __GFP_RETRY_MAYFAIL to avoid such situations.
         */
 
-       const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
+       const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
+       unsigned int flags = 0;
+       unsigned long align = 1;
        void *area;
 
        if (size >= SIZE_MAX)
                return NULL;
 
        /* kmalloc()'ed memory can't be mmap()'ed */
-       if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
-               area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
+       if (mmapable) {
+               BUG_ON(!PAGE_ALIGNED(size));
+               align = SHMLBA;
+               flags = VM_USERMAP;
+       } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+               area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
                                    numa_node);
                if (area != NULL)
                        return area;
        }
-       if (mmapable) {
-               BUG_ON(!PAGE_ALIGNED(size));
-               return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL |
-                                              __GFP_RETRY_MAYFAIL | flags);
-       }
-       return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_RETRY_MAYFAIL | flags,
-                             numa_node, __builtin_return_address(0));
+
+       return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+                       gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
+                       flags, numa_node, __builtin_return_address(0));
 }
 
 void *bpf_map_area_alloc(u64 size, int numa_node)