Linux cpanel.rrshost.in 5.15.0-25-generic #25-Ubuntu SMP Wed Mar 30 15:54:22 UTC 2022 x86_64
Apache
: 109.123.238.221 | : 108.162.241.156
128 Domain
8.2.28
aev999
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
src /
linux-headers-5.15.0-25 /
include /
net /
[ HOME SHELL ]
Name
Size
Permission
Action
9p
[ DIR ]
drwxr-xr-x
bluetooth
[ DIR ]
drwxr-xr-x
caif
[ DIR ]
drwxr-xr-x
iucv
[ DIR ]
drwxr-xr-x
netfilter
[ DIR ]
drwxr-xr-x
netns
[ DIR ]
drwxr-xr-x
nfc
[ DIR ]
drwxr-xr-x
phonet
[ DIR ]
drwxr-xr-x
sctp
[ DIR ]
drwxr-xr-x
tc_act
[ DIR ]
drwxr-xr-x
6lowpan.h
10.03
KB
-rw-r--r--
Space.h
737
B
-rw-r--r--
act_api.h
8.42
KB
-rw-r--r--
addrconf.h
13.95
KB
-rw-r--r--
af_ieee802154.h
1.19
KB
-rw-r--r--
af_rxrpc.h
2.73
KB
-rw-r--r--
af_unix.h
2.92
KB
-rw-r--r--
af_vsock.h
7.37
KB
-rw-r--r--
ah.h
382
B
-rw-r--r--
arp.h
2.03
KB
-rw-r--r--
atmclip.h
1.48
KB
-rw-r--r--
ax25.h
14.84
KB
-rw-r--r--
ax88796.h
1.41
KB
-rw-r--r--
bareudp.h
572
B
-rw-r--r--
bond_3ad.h
9.49
KB
-rw-r--r--
bond_alb.h
6.09
KB
-rw-r--r--
bond_options.h
3.76
KB
-rw-r--r--
bonding.h
20.18
KB
-rw-r--r--
bpf_sk_storage.h
1.74
KB
-rw-r--r--
busy_poll.h
3.85
KB
-rw-r--r--
calipso.h
1.55
KB
-rw-r--r--
cfg80211-wext.h
1.81
KB
-rw-r--r--
cfg80211.h
291.42
KB
-rw-r--r--
cfg802154.h
10.5
KB
-rw-r--r--
checksum.h
4.74
KB
-rw-r--r--
cipso_ipv4.h
7.37
KB
-rw-r--r--
cls_cgroup.h
2.04
KB
-rw-r--r--
codel.h
5.65
KB
-rw-r--r--
codel_impl.h
7.98
KB
-rw-r--r--
codel_qdisc.h
2.9
KB
-rw-r--r--
compat.h
2.53
KB
-rw-r--r--
datalink.h
619
B
-rw-r--r--
dcbevent.h
742
B
-rw-r--r--
dcbnl.h
4.22
KB
-rw-r--r--
devlink.h
62.1
KB
-rw-r--r--
dn.h
6.88
KB
-rw-r--r--
dn_dev.h
5.36
KB
-rw-r--r--
dn_fib.h
3.99
KB
-rw-r--r--
dn_neigh.h
968
B
-rw-r--r--
dn_nsp.h
5.39
KB
-rw-r--r--
dn_route.h
3.96
KB
-rw-r--r--
dsa.h
34.12
KB
-rw-r--r--
dsfield.h
1.12
KB
-rw-r--r--
dst.h
14.46
KB
-rw-r--r--
dst_cache.h
2.96
KB
-rw-r--r--
dst_metadata.h
5.39
KB
-rw-r--r--
dst_ops.h
2.07
KB
-rw-r--r--
erspan.h
8.96
KB
-rw-r--r--
esp.h
1.23
KB
-rw-r--r--
espintcp.h
966
B
-rw-r--r--
ethoc.h
391
B
-rw-r--r--
failover.h
1.15
KB
-rw-r--r--
fib_notifier.h
1.35
KB
-rw-r--r--
fib_rules.h
6.3
KB
-rw-r--r--
firewire.h
636
B
-rw-r--r--
flow.h
5.53
KB
-rw-r--r--
flow_dissector.h
9.89
KB
-rw-r--r--
flow_offload.h
15.34
KB
-rw-r--r--
fou.h
549
B
-rw-r--r--
fq.h
2.47
KB
-rw-r--r--
fq_impl.h
7.89
KB
-rw-r--r--
garp.h
2.62
KB
-rw-r--r--
gen_stats.h
2.82
KB
-rw-r--r--
genetlink.h
13.32
KB
-rw-r--r--
geneve.h
1.85
KB
-rw-r--r--
gre.h
3.29
KB
-rw-r--r--
gro.h
783
B
-rw-r--r--
gro_cells.h
443
B
-rw-r--r--
gtp.h
633
B
-rw-r--r--
gue.h
3.24
KB
-rw-r--r--
hwbm.h
969
B
-rw-r--r--
icmp.h
1.87
KB
-rw-r--r--
ieee80211_radiotap.h
13.2
KB
-rw-r--r--
ieee802154_netdev.h
8.75
KB
-rw-r--r--
if_inet6.h
6.3
KB
-rw-r--r--
ife.h
1.03
KB
-rw-r--r--
ila.h
291
B
-rw-r--r--
inet6_connection_sock.h
765
B
-rw-r--r--
inet6_hashtables.h
3.44
KB
-rw-r--r--
inet_common.h
2.55
KB
-rw-r--r--
inet_connection_sock.h
11.06
KB
-rw-r--r--
inet_ecn.h
7.45
KB
-rw-r--r--
inet_frag.h
4.93
KB
-rw-r--r--
inet_hashtables.h
13.99
KB
-rw-r--r--
inet_sock.h
9.25
KB
-rw-r--r--
inet_timewait_sock.h
3.65
KB
-rw-r--r--
inetpeer.h
3.29
KB
-rw-r--r--
ioam6.h
1.17
KB
-rw-r--r--
ip.h
22.08
KB
-rw-r--r--
ip6_checksum.h
2.71
KB
-rw-r--r--
ip6_fib.h
15.82
KB
-rw-r--r--
ip6_route.h
9.99
KB
-rw-r--r--
ip6_tunnel.h
4.97
KB
-rw-r--r--
ip_fib.h
15.86
KB
-rw-r--r--
ip_tunnels.h
14.14
KB
-rw-r--r--
ip_vs.h
49.38
KB
-rw-r--r--
ipcomp.h
659
B
-rw-r--r--
ipconfig.h
811
B
-rw-r--r--
ipv6.h
35.34
KB
-rw-r--r--
ipv6_frag.h
3.28
KB
-rw-r--r--
ipv6_stubs.h
3.23
KB
-rw-r--r--
iw_handler.h
20.91
KB
-rw-r--r--
kcm.h
4.82
KB
-rw-r--r--
l3mdev.h
6.99
KB
-rw-r--r--
lag.h
409
B
-rw-r--r--
lapb.h
4.81
KB
-rw-r--r--
lib80211.h
3.92
KB
-rw-r--r--
llc.h
4.41
KB
-rw-r--r--
llc_c_ac.h
9.31
KB
-rw-r--r--
llc_c_ev.h
10.68
KB
-rw-r--r--
llc_c_st.h
1.72
KB
-rw-r--r--
llc_conn.h
4.06
KB
-rw-r--r--
llc_if.h
2.16
KB
-rw-r--r--
llc_pdu.h
14.44
KB
-rw-r--r--
llc_s_ac.h
1.55
KB
-rw-r--r--
llc_s_ev.h
2.2
KB
-rw-r--r--
llc_s_st.h
947
B
-rw-r--r--
llc_sap.h
1.08
KB
-rw-r--r--
lwtunnel.h
6.49
KB
-rw-r--r--
mac80211.h
266.84
KB
-rw-r--r--
mac802154.h
14.88
KB
-rw-r--r--
macsec.h
6.9
KB
-rw-r--r--
mctp.h
6.01
KB
-rw-r--r--
mctpdevice.h
735
B
-rw-r--r--
mip6.h
1016
B
-rw-r--r--
mld.h
2.85
KB
-rw-r--r--
mpls.h
943
B
-rw-r--r--
mpls_iptunnel.h
429
B
-rw-r--r--
mptcp.h
6.34
KB
-rw-r--r--
mrp.h
3.03
KB
-rw-r--r--
ncsi.h
1.92
KB
-rw-r--r--
ndisc.h
14.79
KB
-rw-r--r--
neighbour.h
15.97
KB
-rw-r--r--
net_failover.h
1023
B
-rw-r--r--
net_namespace.h
11.87
KB
-rw-r--r--
net_ratelimit.h
220
B
-rw-r--r--
netevent.h
1.02
KB
-rw-r--r--
netlabel.h
20.19
KB
-rw-r--r--
netlink.h
60.21
KB
-rw-r--r--
netprio_cgroup.h
1.02
KB
-rw-r--r--
netrom.h
7.71
KB
-rw-r--r--
nexthop.h
12.27
KB
-rw-r--r--
nl802154.h
12.09
KB
-rw-r--r--
nsh.h
12.31
KB
-rw-r--r--
p8022.h
447
B
-rw-r--r--
page_pool.h
8.88
KB
-rw-r--r--
pie.h
3.6
KB
-rw-r--r--
ping.h
2.9
KB
-rw-r--r--
pkt_cls.h
23.86
KB
-rw-r--r--
pkt_sched.h
5.37
KB
-rw-r--r--
pptp.h
557
B
-rw-r--r--
protocol.h
4
KB
-rw-r--r--
psample.h
1.06
KB
-rw-r--r--
psnap.h
351
B
-rw-r--r--
raw.h
2.08
KB
-rw-r--r--
rawv6.h
854
B
-rw-r--r--
red.h
11.38
KB
-rw-r--r--
regulatory.h
10.65
KB
-rw-r--r--
request_sock.h
6.47
KB
-rw-r--r--
rose.h
7.62
KB
-rw-r--r--
route.h
11.24
KB
-rw-r--r--
rpl.h
972
B
-rw-r--r--
rsi_91x.h
1.67
KB
-rw-r--r--
rtnetlink.h
6.72
KB
-rw-r--r--
rtnh.h
859
B
-rw-r--r--
sch_generic.h
33.86
KB
-rw-r--r--
scm.h
3.58
KB
-rw-r--r--
secure_seq.h
855
B
-rw-r--r--
seg6.h
2.18
KB
-rw-r--r--
seg6_hmac.h
1.44
KB
-rw-r--r--
seg6_local.h
644
B
-rw-r--r--
selftests.h
582
B
-rw-r--r--
slhc_vj.h
6.67
KB
-rw-r--r--
smc.h
2.45
KB
-rw-r--r--
snmp.h
5.14
KB
-rw-r--r--
sock.h
79.96
KB
-rw-r--r--
sock_reuseport.h
1.74
KB
-rw-r--r--
stp.h
383
B
-rw-r--r--
strparser.h
4.1
KB
-rw-r--r--
switchdev.h
13.96
KB
-rw-r--r--
tcp.h
71.78
KB
-rw-r--r--
tcp_states.h
1.2
KB
-rw-r--r--
timewait_sock.h
925
B
-rw-r--r--
tipc.h
2.35
KB
-rw-r--r--
tls.h
20.91
KB
-rw-r--r--
tls_toe.h
2.94
KB
-rw-r--r--
transp_v6.h
1.95
KB
-rw-r--r--
tso.h
566
B
-rw-r--r--
tun_proto.h
988
B
-rw-r--r--
udp.h
16.42
KB
-rw-r--r--
udp_tunnel.h
11.75
KB
-rw-r--r--
udplite.h
3.83
KB
-rw-r--r--
vsock_addr.h
662
B
-rw-r--r--
vxlan.h
13.91
KB
-rw-r--r--
wext.h
1.47
KB
-rw-r--r--
x25.h
9.49
KB
-rw-r--r--
x25device.h
387
B
-rw-r--r--
xdp.h
8.48
KB
-rw-r--r--
xdp_priv.h
446
B
-rw-r--r--
xdp_sock.h
2.04
KB
-rw-r--r--
xdp_sock_drv.h
5.53
KB
-rw-r--r--
xfrm.h
55.02
KB
-rw-r--r--
xsk_buff_pool.h
4.98
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : page_pool.h
/* SPDX-License-Identifier: GPL-2.0 * * page_pool.h * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> * Copyright (C) 2016 Red Hat, Inc. */ /** * DOC: page_pool allocator * * This page_pool allocator is optimized for the XDP mode that * uses one-frame-per-page, but have fallbacks that act like the * regular page allocator APIs. * * Basic use involve replacing alloc_pages() calls with the * page_pool_alloc_pages() call. Drivers should likely use * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). * * API keeps track of in-flight pages, in-order to let API user know * when it is safe to dealloactor page_pool object. Thus, API users * must make sure to call page_pool_release_page() when a page is * "leaving" the page_pool. Or call page_pool_put_page() where * appropiate. For maintaining correct accounting. * * API user must only call page_pool_put_page() once on a page, as it * will either recycle the page, or in case of elevated refcnt, it * will release the DMA mapping and in-flight state accounting. We * hope to lift this requirement in the future. */ #ifndef _NET_PAGE_POOL_H #define _NET_PAGE_POOL_H #include <linux/mm.h> /* Needed by ptr_ring */ #include <linux/ptr_ring.h> #include <linux/dma-direction.h> #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA * map/unmap */ #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets * from page_pool will be * DMA-synced-for-device according to * the length provided by the device * driver. * Please note DMA-sync-for-CPU is still * device driver responsibility */ #define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ PP_FLAG_DMA_SYNC_DEV |\ PP_FLAG_PAGE_FRAG) /* * Fast allocation side cache array/stack * * The cache size and refill watermark is related to the network * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX * ring is usually refilled and the max consumed elements will be 64, * thus a natural max size of objects needed in the cache. * * Keeping room for more objects, is due to XDP_DROP use-case. As * XDP_DROP allows the opportunity to recycle objects directly into * this array, as it shares the same softirq/NAPI protection. If * cache is already full (or partly full) then the XDP_DROP recycles * would have to take a slower code path. */ #define PP_ALLOC_CACHE_SIZE 128 #define PP_ALLOC_CACHE_REFILL 64 struct pp_alloc_cache { u32 count; struct page *cache[PP_ALLOC_CACHE_SIZE]; }; struct page_pool_params { unsigned int flags; unsigned int order; unsigned int pool_size; int nid; /* Numa node id to allocate from pages from */ struct device *dev; /* device, for DMA pre-mapping purposes */ enum dma_data_direction dma_dir; /* DMA mapping direction */ unsigned int max_len; /* max DMA sync memory size */ unsigned int offset; /* DMA addr offset */ }; struct page_pool { struct page_pool_params p; struct delayed_work release_dw; void (*disconnect)(void *); unsigned long defer_start; unsigned long defer_warn; u32 pages_state_hold_cnt; unsigned int frag_offset; struct page *frag_page; long frag_users; /* * Data structure for allocation side * * Drivers allocation side usually already perform some kind * of resource protection. Piggyback on this protection, and * require driver to protect allocation side. * * For NIC drivers this means, allocate a page_pool per * RX-queue. As the RX-queue is already protected by * Softirq/BH scheduling and napi_schedule. NAPI schedule * guarantee that a single napi_struct will only be scheduled * on a single CPU (see napi_schedule). */ struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; /* Data structure for storing recycled pages. * * Returning/freeing pages is more complicated synchronization * wise, because free's can happen on remote CPUs, with no * association with allocation resource. * * Use ptr_ring, as it separates consumer and producer * effeciently, it a way that doesn't bounce cache-lines. * * TODO: Implement bulk return pages into this structure. */ struct ptr_ring ring; atomic_t pages_state_release_cnt; /* A page_pool is strictly tied to a single RX-queue being * protected by NAPI, due to above pp_alloc_cache. This * refcnt serves purpose is to simplify drivers error handling. */ refcount_t user_cnt; u64 destroy_cnt; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_pages(pool, gfp); } struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size, gfp_t gfp); static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_frag(pool, offset, size, gfp); } /* get the stored dma direction. A driver might decide to treat this locally and * avoid the extra cache line from page_pool to determine the direction */ static inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) { return pool->p.dma_dir; } bool page_pool_return_skb_page(struct page *page); struct page_pool *page_pool_create(const struct page_pool_params *params); #ifdef CONFIG_PAGE_POOL void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); void page_pool_release_page(struct page_pool *pool, struct page *page); void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count); #else static inline void page_pool_destroy(struct page_pool *pool) { } static inline void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)) { } static inline void page_pool_release_page(struct page_pool *pool, struct page *page) { } static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) { } #endif void page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct); /* Same as above but will try to sync the entire area pool->max_len */ static inline void page_pool_put_full_page(struct page_pool *pool, struct page *page, bool allow_direct) { /* When page_pool isn't compiled-in, net/core/xdp.c doesn't * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL page_pool_put_page(pool, page, -1, allow_direct); #endif } /* Same as above but the caller must guarantee safe context. e.g NAPI */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { page_pool_put_full_page(pool, page, true); } #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ (sizeof(dma_addr_t) > sizeof(unsigned long)) static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { dma_addr_t ret = page->dma_addr; if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; return ret; } static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) { page->dma_addr = addr; if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) page->dma_addr_upper = upper_32_bits(addr); } static inline void page_pool_set_frag_count(struct page *page, long nr) { atomic_long_set(&page->pp_frag_count, nr); } static inline long page_pool_atomic_sub_frag_count_return(struct page *page, long nr) { long ret; /* As suggested by Alexander, atomic_long_read() may cover up the * reference count errors, so avoid calling atomic_long_read() in * the cases of freeing or draining the page_frags, where we would * not expect it to match or that are slowpath anyway. */ if (__builtin_constant_p(nr) && atomic_long_read(&page->pp_frag_count) == nr) return 0; ret = atomic_long_sub_return(nr, &page->pp_frag_count); WARN_ON(ret < 0); return ret; } static inline bool is_page_pool_compiled_in(void) { #ifdef CONFIG_PAGE_POOL return true; #else return false; #endif } static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); } /* Caller must provide appropriate safe context, e.g. NAPI. */ void page_pool_update_nid(struct page_pool *pool, int new_nid); static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) { if (unlikely(pool->p.nid != new_nid)) page_pool_update_nid(pool, new_nid); } static inline void page_pool_ring_lock(struct page_pool *pool) __acquires(&pool->ring.producer_lock) { if (in_serving_softirq()) spin_lock(&pool->ring.producer_lock); else spin_lock_bh(&pool->ring.producer_lock); } static inline void page_pool_ring_unlock(struct page_pool *pool) __releases(&pool->ring.producer_lock) { if (in_serving_softirq()) spin_unlock(&pool->ring.producer_lock); else spin_unlock_bh(&pool->ring.producer_lock); } #endif /* _NET_PAGE_POOL_H */
Close