Linux cpanel.rrshost.in 5.15.0-25-generic #25-Ubuntu SMP Wed Mar 30 15:54:22 UTC 2022 x86_64
Apache
: 109.123.238.221 | : 172.69.6.51
128 Domain
8.2.28
aev999
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
src /
linux-headers-5.15.0-25 /
include /
linux /
[ HOME SHELL ]
Name
Size
Permission
Action
amba
[ DIR ]
drwxr-xr-x
atomic
[ DIR ]
drwxr-xr-x
avf
[ DIR ]
drwxr-xr-x
bcma
[ DIR ]
drwxr-xr-x
byteorder
[ DIR ]
drwxr-xr-x
can
[ DIR ]
drwxr-xr-x
ceph
[ DIR ]
drwxr-xr-x
clk
[ DIR ]
drwxr-xr-x
crush
[ DIR ]
drwxr-xr-x
decompress
[ DIR ]
drwxr-xr-x
device
[ DIR ]
drwxr-xr-x
dma
[ DIR ]
drwxr-xr-x
dsa
[ DIR ]
drwxr-xr-x
extcon
[ DIR ]
drwxr-xr-x
firmware
[ DIR ]
drwxr-xr-x
fpga
[ DIR ]
drwxr-xr-x
fsl
[ DIR ]
drwxr-xr-x
gpio
[ DIR ]
drwxr-xr-x
greybus
[ DIR ]
drwxr-xr-x
hsi
[ DIR ]
drwxr-xr-x
i3c
[ DIR ]
drwxr-xr-x
iio
[ DIR ]
drwxr-xr-x
input
[ DIR ]
drwxr-xr-x
irqchip
[ DIR ]
drwxr-xr-x
isdn
[ DIR ]
drwxr-xr-x
lockd
[ DIR ]
drwxr-xr-x
mailbox
[ DIR ]
drwxr-xr-x
mdio
[ DIR ]
drwxr-xr-x
mfd
[ DIR ]
drwxr-xr-x
mlx4
[ DIR ]
drwxr-xr-x
mlx5
[ DIR ]
drwxr-xr-x
mmc
[ DIR ]
drwxr-xr-x
mtd
[ DIR ]
drwxr-xr-x
mux
[ DIR ]
drwxr-xr-x
net
[ DIR ]
drwxr-xr-x
netfilter
[ DIR ]
drwxr-xr-x
netfilter_arp
[ DIR ]
drwxr-xr-x
netfilter_bridge
[ DIR ]
drwxr-xr-x
netfilter_ipv4
[ DIR ]
drwxr-xr-x
netfilter_ipv6
[ DIR ]
drwxr-xr-x
pcs
[ DIR ]
drwxr-xr-x
perf
[ DIR ]
drwxr-xr-x
phy
[ DIR ]
drwxr-xr-x
pinctrl
[ DIR ]
drwxr-xr-x
platform_data
[ DIR ]
drwxr-xr-x
power
[ DIR ]
drwxr-xr-x
qed
[ DIR ]
drwxr-xr-x
raid
[ DIR ]
drwxr-xr-x
regulator
[ DIR ]
drwxr-xr-x
remoteproc
[ DIR ]
drwxr-xr-x
reset
[ DIR ]
drwxr-xr-x
rpmsg
[ DIR ]
drwxr-xr-x
rtc
[ DIR ]
drwxr-xr-x
sched
[ DIR ]
drwxr-xr-x
soc
[ DIR ]
drwxr-xr-x
soundwire
[ DIR ]
drwxr-xr-x
spi
[ DIR ]
drwxr-xr-x
ssb
[ DIR ]
drwxr-xr-x
sunrpc
[ DIR ]
drwxr-xr-x
surface_aggregator
[ DIR ]
drwxr-xr-x
ulpi
[ DIR ]
drwxr-xr-x
unaligned
[ DIR ]
drwxr-xr-x
usb
[ DIR ]
drwxr-xr-x
8250_pci.h
1.01
KB
-rw-r--r--
a.out.h
354
B
-rw-r--r--
acct.h
2.49
KB
-rw-r--r--
acpi.h
39.5
KB
-rw-r--r--
acpi_dma.h
3.08
KB
-rw-r--r--
acpi_iort.h
2.27
KB
-rw-r--r--
acpi_mdio.h
631
B
-rw-r--r--
acpi_pmtmr.h
674
B
-rw-r--r--
acpi_viot.h
389
B
-rw-r--r--
adb.h
1.79
KB
-rw-r--r--
adfs_fs.h
574
B
-rw-r--r--
adreno-smmu-priv.h
2.74
KB
-rw-r--r--
adxl.h
310
B
-rw-r--r--
aer.h
1.78
KB
-rw-r--r--
agp_backend.h
3.45
KB
-rw-r--r--
agpgart.h
3.79
KB
-rw-r--r--
ahci-remap.h
607
B
-rw-r--r--
ahci_platform.h
1.54
KB
-rw-r--r--
aio.h
651
B
-rw-r--r--
alarmtimer.h
1.94
KB
-rw-r--r--
alcor_pci.h
8.83
KB
-rw-r--r--
align.h
552
B
-rw-r--r--
altera_jtaguart.h
379
B
-rw-r--r--
altera_uart.h
397
B
-rw-r--r--
amd-iommu.h
6.18
KB
-rw-r--r--
anon_inodes.h
670
B
-rw-r--r--
apm-emulation.h
1.56
KB
-rw-r--r--
apm_bios.h
2.25
KB
-rw-r--r--
apple-gmux.h
918
B
-rw-r--r--
apple_bl.h
498
B
-rw-r--r--
arch_topology.h
2.49
KB
-rw-r--r--
arm-cci.h
1.36
KB
-rw-r--r--
arm-smccc.h
17.7
KB
-rw-r--r--
arm_ffa.h
7.36
KB
-rw-r--r--
arm_sdei.h
2.51
KB
-rw-r--r--
armada-37xx-rwtm-mailbox.h
431
B
-rw-r--r--
ascii85.h
555
B
-rw-r--r--
asn1.h
1.79
KB
-rw-r--r--
asn1_ber_bytecode.h
2.52
KB
-rw-r--r--
asn1_decoder.h
468
B
-rw-r--r--
asn1_encoder.h
1006
B
-rw-r--r--
assoc_array.h
2.88
KB
-rw-r--r--
assoc_array_priv.h
5.3
KB
-rw-r--r--
async.h
4.26
KB
-rw-r--r--
async_tx.h
6.7
KB
-rw-r--r--
ata.h
32.52
KB
-rw-r--r--
ata_platform.h
749
B
-rw-r--r--
atalk.h
4.48
KB
-rw-r--r--
ath9k_platform.h
1.44
KB
-rw-r--r--
atm.h
287
B
-rw-r--r--
atm_tcp.h
511
B
-rw-r--r--
atmdev.h
9.99
KB
-rw-r--r--
atmel-isc-media.h
2.07
KB
-rw-r--r--
atmel-mci.h
1.4
KB
-rw-r--r--
atmel-ssc.h
9.74
KB
-rw-r--r--
atmel_pdc.h
1.26
KB
-rw-r--r--
atomic.h
2.58
KB
-rw-r--r--
attribute_container.h
2.74
KB
-rw-r--r--
audit.h
19.45
KB
-rw-r--r--
auto_dev-ioctl.h
296
B
-rw-r--r--
auto_fs.h
278
B
-rw-r--r--
auxiliary_bus.h
2.48
KB
-rw-r--r--
auxvec.h
304
B
-rw-r--r--
average.h
2.42
KB
-rw-r--r--
backing-dev-defs.h
8.55
KB
-rw-r--r--
backing-dev.h
11.81
KB
-rw-r--r--
backlight.h
13.13
KB
-rw-r--r--
badblocks.h
2.14
KB
-rw-r--r--
balloon_compaction.h
6.59
KB
-rw-r--r--
bcd.h
559
B
-rw-r--r--
bch.h
2.1
KB
-rw-r--r--
bcm47xx_nvram.h
1.01
KB
-rw-r--r--
bcm47xx_sprom.h
616
B
-rw-r--r--
bcm47xx_wdt.h
555
B
-rw-r--r--
bcm963xx_nvram.h
2.96
KB
-rw-r--r--
bcm963xx_tag.h
3.6
KB
-rw-r--r--
binfmts.h
4.48
KB
-rw-r--r--
bio.h
19.64
KB
-rw-r--r--
bit_spinlock.h
2.3
KB
-rw-r--r--
bitfield.h
4.75
KB
-rw-r--r--
bitmap.h
21.62
KB
-rw-r--r--
bitops.h
8.53
KB
-rw-r--r--
bitrev.h
2.53
KB
-rw-r--r--
bits.h
1.35
KB
-rw-r--r--
blk-cgroup.h
21.4
KB
-rw-r--r--
blk-crypto.h
4.02
KB
-rw-r--r--
blk-mq-pci.h
269
B
-rw-r--r--
blk-mq-rdma.h
273
B
-rw-r--r--
blk-mq-virtio.h
293
B
-rw-r--r--
blk-mq.h
19.37
KB
-rw-r--r--
blk-pm.h
708
B
-rw-r--r--
blk_types.h
15.42
KB
-rw-r--r--
blkdev.h
56.35
KB
-rw-r--r--
blkpg.h
436
B
-rw-r--r--
blktrace_api.h
3.77
KB
-rw-r--r--
blockgroup_lock.h
810
B
-rw-r--r--
bma150.h
1.26
KB
-rw-r--r--
bootconfig.h
8.51
KB
-rw-r--r--
bootmem_info.h
1.61
KB
-rw-r--r--
bottom_half.h
974
B
-rw-r--r--
bpf-cgroup.h
18.52
KB
-rw-r--r--
bpf-netns.h
1.52
KB
-rw-r--r--
bpf.h
69.46
KB
-rw-r--r--
bpf_lirc.h
698
B
-rw-r--r--
bpf_local_storage.h
5.23
KB
-rw-r--r--
bpf_lsm.h
1.42
KB
-rw-r--r--
bpf_trace.h
166
B
-rw-r--r--
bpf_types.h
5.41
KB
-rw-r--r--
bpf_verifier.h
18.45
KB
-rw-r--r--
bpfilter.h
791
B
-rw-r--r--
bpfptr.h
1.83
KB
-rw-r--r--
brcmphy.h
13.67
KB
-rw-r--r--
bsearch.h
624
B
-rw-r--r--
bsg-lib.h
1.72
KB
-rw-r--r--
bsg.h
492
B
-rw-r--r--
btf.h
7.26
KB
-rw-r--r--
btf_ids.h
5.32
KB
-rw-r--r--
btree-128.h
2.67
KB
-rw-r--r--
btree-type.h
3.9
KB
-rw-r--r--
btree.h
6.84
KB
-rw-r--r--
btrfs.h
145
B
-rw-r--r--
buffer_head.h
13.54
KB
-rw-r--r--
bug.h
2.17
KB
-rw-r--r--
build-salt.h
375
B
-rw-r--r--
build_bug.h
2.76
KB
-rw-r--r--
buildid.h
561
B
-rw-r--r--
bvec.h
6.17
KB
-rw-r--r--
c2port.h
1.35
KB
-rw-r--r--
cache.h
2.55
KB
-rw-r--r--
cacheinfo.h
3.75
KB
-rw-r--r--
capability.h
8.29
KB
-rw-r--r--
cb710.h
5.36
KB
-rw-r--r--
cc_platform.h
2.32
KB
-rw-r--r--
cciss_ioctl.h
1.03
KB
-rw-r--r--
ccp.h
18.14
KB
-rw-r--r--
cdev.h
845
B
-rw-r--r--
cdrom.h
8.98
KB
-rw-r--r--
cfag12864b.h
1.47
KB
-rw-r--r--
cfi.h
1.13
KB
-rw-r--r--
cgroup-defs.h
24.1
KB
-rw-r--r--
cgroup.h
28.19
KB
-rw-r--r--
cgroup_rdma.h
1.18
KB
-rw-r--r--
cgroup_subsys.h
1.23
KB
-rw-r--r--
circ_buf.h
1.09
KB
-rw-r--r--
cleancache.h
3.89
KB
-rw-r--r--
clk-provider.h
56.38
KB
-rw-r--r--
clk.h
29.59
KB
-rw-r--r--
clkdev.h
1.29
KB
-rw-r--r--
clockchips.h
7.27
KB
-rw-r--r--
clocksource.h
9.57
KB
-rw-r--r--
clocksource_ids.h
247
B
-rw-r--r--
cm4000_cs.h
199
B
-rw-r--r--
cma.h
1.59
KB
-rw-r--r--
cn_proc.h
1.85
KB
-rw-r--r--
cnt32_to_63.h
3.46
KB
-rw-r--r--
coda.h
2.16
KB
-rw-r--r--
compaction.h
7.15
KB
-rw-r--r--
compat.h
31.35
KB
-rw-r--r--
compiler-clang.h
2.27
KB
-rw-r--r--
compiler-gcc.h
4.44
KB
-rw-r--r--
compiler-intel.h
949
B
-rw-r--r--
compiler-version.h
517
B
-rw-r--r--
compiler.h
7.78
KB
-rw-r--r--
compiler_attributes.h
12.15
KB
-rw-r--r--
compiler_types.h
10.46
KB
-rw-r--r--
completion.h
4
KB
-rw-r--r--
component.h
4.07
KB
-rw-r--r--
configfs.h
8.48
KB
-rw-r--r--
connector.h
3.83
KB
-rw-r--r--
console.h
7.28
KB
-rw-r--r--
console_struct.h
7.08
KB
-rw-r--r--
consolemap.h
1.05
KB
-rw-r--r--
const.h
421
B
-rw-r--r--
container.h
610
B
-rw-r--r--
context_tracking.h
3.28
KB
-rw-r--r--
context_tracking_state.h
1.58
KB
-rw-r--r--
cookie.h
1.22
KB
-rw-r--r--
cordic.h
2.08
KB
-rw-r--r--
coredump.h
1.23
KB
-rw-r--r--
coresight-pmu.h
1.32
KB
-rw-r--r--
coresight-stm.h
152
B
-rw-r--r--
coresight.h
17.81
KB
-rw-r--r--
count_zeros.h
1.42
KB
-rw-r--r--
counter.h
15.13
KB
-rw-r--r--
counter_enum.h
1.43
KB
-rw-r--r--
cper.h
16.41
KB
-rw-r--r--
cpu.h
7.31
KB
-rw-r--r--
cpu_cooling.h
1.86
KB
-rw-r--r--
cpu_pm.h
2.38
KB
-rw-r--r--
cpu_rmap.h
1.68
KB
-rw-r--r--
cpufeature.h
1.71
KB
-rw-r--r--
cpufreq.h
32.57
KB
-rw-r--r--
cpuhotplug.h
16.47
KB
-rw-r--r--
cpuidle.h
10.27
KB
-rw-r--r--
cpuidle_haltpoll.h
312
B
-rw-r--r--
cpumask.h
28.61
KB
-rw-r--r--
cpuset.h
7.34
KB
-rw-r--r--
crash_core.h
3.38
KB
-rw-r--r--
crash_dump.h
4.05
KB
-rw-r--r--
crc-ccitt.h
609
B
-rw-r--r--
crc-itu-t.h
531
B
-rw-r--r--
crc-t10dif.h
453
B
-rw-r--r--
crc16.h
540
B
-rw-r--r--
crc32.h
2.83
KB
-rw-r--r--
crc32c.h
331
B
-rw-r--r--
crc32poly.h
610
B
-rw-r--r--
crc4.h
192
B
-rw-r--r--
crc64.h
280
B
-rw-r--r--
crc7.h
316
B
-rw-r--r--
crc8.h
3.66
KB
-rw-r--r--
cred.h
12.48
KB
-rw-r--r--
crypto.h
27.08
KB
-rw-r--r--
cs5535.h
6.13
KB
-rw-r--r--
ctype.h
1.87
KB
-rw-r--r--
cuda.h
613
B
-rw-r--r--
damon.h
10.35
KB
-rw-r--r--
dasd_mod.h
204
B
-rw-r--r--
davinci_emac.h
1.05
KB
-rw-r--r--
dax.h
6.56
KB
-rw-r--r--
dca.h
1.88
KB
-rw-r--r--
dcache.h
18.63
KB
-rw-r--r--
dccp.h
10.73
KB
-rw-r--r--
debug_locks.h
1.59
KB
-rw-r--r--
debugfs.h
11.22
KB
-rw-r--r--
debugobjects.h
3.99
KB
-rw-r--r--
delay.h
2.43
KB
-rw-r--r--
delayacct.h
5.45
KB
-rw-r--r--
delayed_call.h
709
B
-rw-r--r--
dev_printk.h
9.05
KB
-rw-r--r--
devcoredump.h
2.21
KB
-rw-r--r--
devfreq-event.h
5.95
KB
-rw-r--r--
devfreq.h
13.98
KB
-rw-r--r--
devfreq_cooling.h
2.7
KB
-rw-r--r--
device-mapper.h
18.92
KB
-rw-r--r--
device.h
32.89
KB
-rw-r--r--
device_cgroup.h
1.57
KB
-rw-r--r--
devm-helpers.h
2.68
KB
-rw-r--r--
devpts_fs.h
1.13
KB
-rw-r--r--
dfl.h
2.38
KB
-rw-r--r--
digsig.h
1.18
KB
-rw-r--r--
dim.h
9.13
KB
-rw-r--r--
dio.h
10.76
KB
-rw-r--r--
dirent.h
215
B
-rw-r--r--
dlm.h
5.86
KB
-rw-r--r--
dlm_plock.h
532
B
-rw-r--r--
dm-bufio.h
4.88
KB
-rw-r--r--
dm-dirty-log.h
3.94
KB
-rw-r--r--
dm-io.h
1.93
KB
-rw-r--r--
dm-kcopyd.h
2.94
KB
-rw-r--r--
dm-region-hash.h
3.11
KB
-rw-r--r--
dm9000.h
987
B
-rw-r--r--
dma-buf-map.h
8.12
KB
-rw-r--r--
dma-buf.h
20.47
KB
-rw-r--r--
dma-direct.h
3.59
KB
-rw-r--r--
dma-direction.h
407
B
-rw-r--r--
dma-fence-array.h
2.14
KB
-rw-r--r--
dma-fence-chain.h
3.07
KB
-rw-r--r--
dma-fence.h
20.25
KB
-rw-r--r--
dma-heap.h
1.58
KB
-rw-r--r--
dma-iommu.h
2.21
KB
-rw-r--r--
dma-map-ops.h
12.73
KB
-rw-r--r--
dma-mapping.h
19.81
KB
-rw-r--r--
dma-resv.h
9.05
KB
-rw-r--r--
dmaengine.h
53.53
KB
-rw-r--r--
dmapool.h
1.79
KB
-rw-r--r--
dmar.h
7.74
KB
-rw-r--r--
dmi.h
4.27
KB
-rw-r--r--
dnotify.h
1.02
KB
-rw-r--r--
dns_resolver.h
1.36
KB
-rw-r--r--
dqblk_qtree.h
2.19
KB
-rw-r--r--
dqblk_v1.h
327
B
-rw-r--r--
dqblk_v2.h
406
B
-rw-r--r--
drbd.h
10.07
KB
-rw-r--r--
drbd_genl.h
21.49
KB
-rw-r--r--
drbd_genl_api.h
1.77
KB
-rw-r--r--
drbd_limits.h
7.82
KB
-rw-r--r--
ds2782_battery.h
158
B
-rw-r--r--
dtlk.h
3.5
KB
-rw-r--r--
dtpm.h
1.64
KB
-rw-r--r--
dw_apb_timer.h
1.53
KB
-rw-r--r--
dynamic_debug.h
6.6
KB
-rw-r--r--
dynamic_queue_limits.h
3.72
KB
-rw-r--r--
earlycpio.h
359
B
-rw-r--r--
ecryptfs.h
3.82
KB
-rw-r--r--
edac.h
19.46
KB
-rw-r--r--
edd.h
1.05
KB
-rw-r--r--
eeprom_93cx6.h
2.31
KB
-rw-r--r--
eeprom_93xx46.h
1.04
KB
-rw-r--r--
efi-bgrt.h
644
B
-rw-r--r--
efi.h
42.25
KB
-rw-r--r--
efi_embedded_fw.h
1.04
KB
-rw-r--r--
efs_vh.h
1.55
KB
-rw-r--r--
eisa.h
2.96
KB
-rw-r--r--
elevator.h
5.52
KB
-rw-r--r--
elf-fdpic.h
1.98
KB
-rw-r--r--
elf-randomize.h
583
B
-rw-r--r--
elf.h
2.96
KB
-rw-r--r--
elfcore-compat.h
1.27
KB
-rw-r--r--
elfcore.h
4.11
KB
-rw-r--r--
elfnote-lto.h
314
B
-rw-r--r--
elfnote.h
3.54
KB
-rw-r--r--
enclosure.h
4.02
KB
-rw-r--r--
energy_model.h
8.17
KB
-rw-r--r--
entry-common.h
16.95
KB
-rw-r--r--
entry-kvm.h
2.75
KB
-rw-r--r--
err.h
1.49
KB
-rw-r--r--
errname.h
283
B
-rw-r--r--
errno.h
1.45
KB
-rw-r--r--
error-injection.h
607
B
-rw-r--r--
errqueue.h
524
B
-rw-r--r--
errseq.h
382
B
-rw-r--r--
etherdevice.h
16.41
KB
-rw-r--r--
ethtool.h
31.72
KB
-rw-r--r--
ethtool_netlink.h
1.98
KB
-rw-r--r--
eventfd.h
2.1
KB
-rw-r--r--
eventpoll.h
2.4
KB
-rw-r--r--
evm.h
3.42
KB
-rw-r--r--
export.h
5.24
KB
-rw-r--r--
exportfs.h
8.25
KB
-rw-r--r--
ext2_fs.h
967
B
-rw-r--r--
extable.h
1.28
KB
-rw-r--r--
extcon-provider.h
3.83
KB
-rw-r--r--
extcon.h
10.17
KB
-rw-r--r--
f2fs_fs.h
19
KB
-rw-r--r--
f75375s.h
541
B
-rw-r--r--
falloc.h
1.59
KB
-rw-r--r--
fanotify.h
3.85
KB
-rw-r--r--
fault-inject-usercopy.h
496
B
-rw-r--r--
fault-inject.h
1.93
KB
-rw-r--r--
fb.h
27.83
KB
-rw-r--r--
fbcon.h
1.88
KB
-rw-r--r--
fcdevice.h
774
B
-rw-r--r--
fcntl.h
1.66
KB
-rw-r--r--
fd.h
490
B
-rw-r--r--
fddidevice.h
833
B
-rw-r--r--
fdtable.h
3.58
KB
-rw-r--r--
fec.h
463
B
-rw-r--r--
fiemap.h
713
B
-rw-r--r--
file.h
2.89
KB
-rw-r--r--
fileattr.h
1.91
KB
-rw-r--r--
filter.h
42.08
KB
-rw-r--r--
fips.h
303
B
-rw-r--r--
firewire.h
13.4
KB
-rw-r--r--
firmware-map.h
959
B
-rw-r--r--
firmware.h
3.27
KB
-rw-r--r--
fixp-arith.h
4.19
KB
-rw-r--r--
flat.h
2.57
KB
-rw-r--r--
flex_proportions.h
2.81
KB
-rw-r--r--
font.h
1.67
KB
-rw-r--r--
fortify-string.h
9.11
KB
-rw-r--r--
freelist.h
3.8
KB
-rw-r--r--
freezer.h
8.98
KB
-rw-r--r--
frontswap.h
3.04
KB
-rw-r--r--
fs.h
120.45
KB
-rw-r--r--
fs_context.h
8.68
KB
-rw-r--r--
fs_enet_pd.h
3.38
KB
-rw-r--r--
fs_parser.h
4.44
KB
-rw-r--r--
fs_pin.h
539
B
-rw-r--r--
fs_stack.h
811
B
-rw-r--r--
fs_struct.h
1.04
KB
-rw-r--r--
fs_types.h
2.02
KB
-rw-r--r--
fs_uart_pd.h
1.49
KB
-rw-r--r--
fscache-cache.h
18.58
KB
-rw-r--r--
fscache.h
29.59
KB
-rw-r--r--
fscrypt.h
29.89
KB
-rw-r--r--
fsi-occ.h
681
B
-rw-r--r--
fsi-sbefifo.h
629
B
-rw-r--r--
fsi.h
2.26
KB
-rw-r--r--
fsl-diu-fb.h
3.87
KB
-rw-r--r--
fsl_devices.h
4.16
KB
-rw-r--r--
fsl_hypervisor.h
2.76
KB
-rw-r--r--
fsl_ifc.h
24.48
KB
-rw-r--r--
fsldma.h
202
B
-rw-r--r--
fsnotify.h
8.85
KB
-rw-r--r--
fsnotify_backend.h
22.93
KB
-rw-r--r--
fsverity.h
6.75
KB
-rw-r--r--
ftrace.h
34.75
KB
-rw-r--r--
ftrace_irq.h
884
B
-rw-r--r--
futex.h
2.44
KB
-rw-r--r--
fwnode.h
6.8
KB
-rw-r--r--
fwnode_mdio.h
858
B
-rw-r--r--
gameport.h
5.42
KB
-rw-r--r--
gcd.h
193
B
-rw-r--r--
genalloc.h
7.68
KB
-rw-r--r--
generic-radix-tree.h
6.35
KB
-rw-r--r--
genetlink.h
589
B
-rw-r--r--
genhd.h
9.23
KB
-rw-r--r--
genl_magic_func.h
11.53
KB
-rw-r--r--
genl_magic_struct.h
7.61
KB
-rw-r--r--
getcpu.h
641
B
-rw-r--r--
gfp.h
26.01
KB
-rw-r--r--
glob.h
256
B
-rw-r--r--
gnss.h
1.56
KB
-rw-r--r--
goldfish.h
762
B
-rw-r--r--
gpio-pxa.h
571
B
-rw-r--r--
gpio.h
5.3
KB
-rw-r--r--
gpio_keys.h
1.74
KB
-rw-r--r--
greybus.h
4.14
KB
-rw-r--r--
hardirq.h
3.43
KB
-rw-r--r--
hash.h
3
KB
-rw-r--r--
hashtable.h
6.67
KB
-rw-r--r--
hdlc.h
3.19
KB
-rw-r--r--
hdlcdrv.h
6.32
KB
-rw-r--r--
hdmi.h
12.41
KB
-rw-r--r--
hid-debug.h
1.42
KB
-rw-r--r--
hid-roccat.h
484
B
-rw-r--r--
hid-sensor-hub.h
8.97
KB
-rw-r--r--
hid-sensor-ids.h
7.44
KB
-rw-r--r--
hid.h
37.67
KB
-rw-r--r--
hidden.h
966
B
-rw-r--r--
hiddev.h
1.42
KB
-rw-r--r--
hidraw.h
1.12
KB
-rw-r--r--
highmem-internal.h
5.06
KB
-rw-r--r--
highmem.h
8.95
KB
-rw-r--r--
highuid.h
3.12
KB
-rw-r--r--
hil.h
18.42
KB
-rw-r--r--
hil_mlc.h
5.13
KB
-rw-r--r--
hippidevice.h
1.03
KB
-rw-r--r--
hmm.h
4.07
KB
-rw-r--r--
host1x.h
10.74
KB
-rw-r--r--
hp_sdc.h
14.02
KB
-rw-r--r--
hpet.h
2.55
KB
-rw-r--r--
hrtimer.h
16.01
KB
-rw-r--r--
hrtimer_defs.h
660
B
-rw-r--r--
htcpld.h
617
B
-rw-r--r--
huge_mm.h
13.27
KB
-rw-r--r--
hugetlb.h
29.09
KB
-rw-r--r--
hugetlb_cgroup.h
7.07
KB
-rw-r--r--
hugetlb_inline.h
374
B
-rw-r--r--
hw_breakpoint.h
4.15
KB
-rw-r--r--
hw_random.h
2.24
KB
-rw-r--r--
hwmon-sysfs.h
2.77
KB
-rw-r--r--
hwmon-vid.h
862
B
-rw-r--r--
hwmon.h
14.78
KB
-rw-r--r--
hwspinlock.h
15.64
KB
-rw-r--r--
hyperv.h
47.93
KB
-rw-r--r--
hypervisor.h
608
B
-rw-r--r--
i2c-algo-bit.h
1.4
KB
-rw-r--r--
i2c-algo-pca.h
2.89
KB
-rw-r--r--
i2c-algo-pcf.h
1.21
KB
-rw-r--r--
i2c-dev.h
371
B
-rw-r--r--
i2c-mux.h
1.63
KB
-rw-r--r--
i2c-smbus.h
1.73
KB
-rw-r--r--
i2c.h
37.74
KB
-rw-r--r--
i8042.h
1.99
KB
-rw-r--r--
i8253.h
809
B
-rw-r--r--
icmp.h
1
KB
-rw-r--r--
icmpv6.h
2.7
KB
-rw-r--r--
idle_inject.h
932
B
-rw-r--r--
idr.h
9.58
KB
-rw-r--r--
ieee80211.h
123.96
KB
-rw-r--r--
ieee802154.h
11.12
KB
-rw-r--r--
if_arp.h
1.63
KB
-rw-r--r--
if_bridge.h
4.9
KB
-rw-r--r--
if_eql.h
1.07
KB
-rw-r--r--
if_ether.h
1.26
KB
-rw-r--r--
if_fddi.h
3.24
KB
-rw-r--r--
if_hsr.h
586
B
-rw-r--r--
if_link.h
592
B
-rw-r--r--
if_ltalk.h
188
B
-rw-r--r--
if_macvlan.h
2.69
KB
-rw-r--r--
if_phonet.h
319
B
-rw-r--r--
if_pppol2tp.h
516
B
-rw-r--r--
if_pppox.h
2.84
KB
-rw-r--r--
if_rmnet.h
2.04
KB
-rw-r--r--
if_tap.h
2.21
KB
-rw-r--r--
if_team.h
7.84
KB
-rw-r--r--
if_tun.h
1.55
KB
-rw-r--r--
if_tunnel.h
409
B
-rw-r--r--
if_vlan.h
20.37
KB
-rw-r--r--
igmp.h
4.33
KB
-rw-r--r--
ihex.h
2.15
KB
-rw-r--r--
ima.h
5.7
KB
-rw-r--r--
imx-media.h
605
B
-rw-r--r--
in.h
2.29
KB
-rw-r--r--
in6.h
1.63
KB
-rw-r--r--
indirect_call_wrapper.h
2.23
KB
-rw-r--r--
inet.h
2.64
KB
-rw-r--r--
inet_diag.h
2.74
KB
-rw-r--r--
inetdevice.h
8.87
KB
-rw-r--r--
init.h
11.39
KB
-rw-r--r--
init_ohci1394_dma.h
196
B
-rw-r--r--
init_syscalls.h
1.01
KB
-rw-r--r--
init_task.h
1.3
KB
-rw-r--r--
initrd.h
1.02
KB
-rw-r--r--
inotify.h
713
B
-rw-r--r--
input.h
19.88
KB
-rw-r--r--
instrumentation.h
1.93
KB
-rw-r--r--
instrumented.h
3.6
KB
-rw-r--r--
integrity.h
1.15
KB
-rw-r--r--
intel-iommu.h
27.48
KB
-rw-r--r--
intel-ish-client-if.h
3.97
KB
-rw-r--r--
intel-svm.h
1.54
KB
-rw-r--r--
intel_rapl.h
4.14
KB
-rw-r--r--
intel_th.h
2.44
KB
-rw-r--r--
interconnect-provider.h
5.51
KB
-rw-r--r--
interconnect.h
2.88
KB
-rw-r--r--
interrupt.h
23.81
KB
-rw-r--r--
interval_tree.h
831
B
-rw-r--r--
interval_tree_generic.h
6.7
KB
-rw-r--r--
io-64-nonatomic-hi-lo.h
2.41
KB
-rw-r--r--
io-64-nonatomic-lo-hi.h
2.41
KB
-rw-r--r--
io-mapping.h
4.65
KB
-rw-r--r--
io-pgtable.h
8.39
KB
-rw-r--r--
io.h
5.02
KB
-rw-r--r--
io_uring.h
902
B
-rw-r--r--
ioam6.h
226
B
-rw-r--r--
ioam6_genl.h
266
B
-rw-r--r--
ioam6_iptunnel.h
285
B
-rw-r--r--
ioasid.h
2.07
KB
-rw-r--r--
iocontext.h
4.59
KB
-rw-r--r--
iomap.h
11.67
KB
-rw-r--r--
iommu-helper.h
1.12
KB
-rw-r--r--
iommu.h
34.04
KB
-rw-r--r--
iopoll.h
7.58
KB
-rw-r--r--
ioport.h
12.16
KB
-rw-r--r--
ioprio.h
1.74
KB
-rw-r--r--
iova.h
6.15
KB
-rw-r--r--
ip.h
1
KB
-rw-r--r--
ipack.h
8.69
KB
-rw-r--r--
ipc.h
613
B
-rw-r--r--
ipc_namespace.h
4.98
KB
-rw-r--r--
ipmi.h
10.88
KB
-rw-r--r--
ipmi_smi.h
7.98
KB
-rw-r--r--
ipv6.h
8.67
KB
-rw-r--r--
ipv6_route.h
372
B
-rw-r--r--
irq.h
41.58
KB
-rw-r--r--
irq_poll.h
575
B
-rw-r--r--
irq_sim.h
789
B
-rw-r--r--
irq_work.h
1.79
KB
-rw-r--r--
irqbypass.h
3.45
KB
-rw-r--r--
irqchip.h
2.33
KB
-rw-r--r--
irqdesc.h
7.75
KB
-rw-r--r--
irqdomain.h
19.98
KB
-rw-r--r--
irqflags.h
7.7
KB
-rw-r--r--
irqhandler.h
300
B
-rw-r--r--
irqnr.h
856
B
-rw-r--r--
irqreturn.h
503
B
-rw-r--r--
isa.h
2.12
KB
-rw-r--r--
isapnp.h
2.88
KB
-rw-r--r--
iscsi_boot_sysfs.h
3.71
KB
-rw-r--r--
iscsi_ibft.h
847
B
-rw-r--r--
iversion.h
12.37
KB
-rw-r--r--
jbd2.h
51.42
KB
-rw-r--r--
jhash.h
4.56
KB
-rw-r--r--
jiffies.h
15.07
KB
-rw-r--r--
journal-head.h
2.91
KB
-rw-r--r--
joystick.h
437
B
-rw-r--r--
jump_label.h
16.04
KB
-rw-r--r--
jump_label_ratelimit.h
2.77
KB
-rw-r--r--
jz4740-adc.h
1023
B
-rw-r--r--
jz4780-nemc.h
976
B
-rw-r--r--
kallsyms.h
4.5
KB
-rw-r--r--
kasan-checks.h
1.46
KB
-rw-r--r--
kasan-tags.h
487
B
-rw-r--r--
kasan.h
13.31
KB
-rw-r--r--
kbd_diacr.h
198
B
-rw-r--r--
kbd_kern.h
3.6
KB
-rw-r--r--
kbuild.h
380
B
-rw-r--r--
kconfig.h
2.63
KB
-rw-r--r--
kcore.h
891
B
-rw-r--r--
kcov.h
2.41
KB
-rw-r--r--
kcsan-checks.h
15.86
KB
-rw-r--r--
kcsan.h
1.88
KB
-rw-r--r--
kdb.h
7.33
KB
-rw-r--r--
kdebug.h
487
B
-rw-r--r--
kdev_t.h
1.8
KB
-rw-r--r--
kern_levels.h
1.57
KB
-rw-r--r--
kernel-page-flags.h
528
B
-rw-r--r--
kernel.h
17.14
KB
-rw-r--r--
kernel_read_file.h
1.64
KB
-rw-r--r--
kernel_stat.h
2.97
KB
-rw-r--r--
kernelcapi.h
1.45
KB
-rw-r--r--
kernfs.h
18.11
KB
-rw-r--r--
kexec.h
11.37
KB
-rw-r--r--
key-type.h
6.53
KB
-rw-r--r--
key.h
15.75
KB
-rw-r--r--
keyboard.h
665
B
-rw-r--r--
keyctl.h
1.25
KB
-rw-r--r--
keyslot-manager.h
3.78
KB
-rw-r--r--
kfence.h
8
KB
-rw-r--r--
kfifo.h
26.18
KB
-rw-r--r--
kgdb.h
12.05
KB
-rw-r--r--
khugepaged.h
2.74
KB
-rw-r--r--
klist.h
1.88
KB
-rw-r--r--
kmemleak.h
3.27
KB
-rw-r--r--
kmod.h
1.02
KB
-rw-r--r--
kmsg_dump.h
2.71
KB
-rw-r--r--
kobj_map.h
545
B
-rw-r--r--
kobject.h
7.69
KB
-rw-r--r--
kobject_ns.h
1.9
KB
-rw-r--r--
kprobes.h
13.95
KB
-rw-r--r--
kref.h
3.08
KB
-rw-r--r--
ks0108.h
970
B
-rw-r--r--
ks8842.h
632
B
-rw-r--r--
ks8851_mll.h
472
B
-rw-r--r--
ksm.h
2.47
KB
-rw-r--r--
kstrtox.h
6.59
KB
-rw-r--r--
kthread.h
6.9
KB
-rw-r--r--
ktime.h
5.38
KB
-rw-r--r--
kvm_dirty_ring.h
2.73
KB
-rw-r--r--
kvm_host.h
58.09
KB
-rw-r--r--
kvm_irqfd.h
2
KB
-rw-r--r--
kvm_para.h
402
B
-rw-r--r--
kvm_types.h
2.17
KB
-rw-r--r--
l2tp.h
261
B
-rw-r--r--
lantiq.h
365
B
-rw-r--r--
lapb.h
1.71
KB
-rw-r--r--
latencytop.h
1.16
KB
-rw-r--r--
lcd.h
3.78
KB
-rw-r--r--
lcm.h
275
B
-rw-r--r--
led-class-flash.h
6.74
KB
-rw-r--r--
led-class-multicolor.h
3.26
KB
-rw-r--r--
led-lm3530.h
3.7
KB
-rw-r--r--
leds-bd2802.h
476
B
-rw-r--r--
leds-lp3944.h
950
B
-rw-r--r--
leds-lp3952.h
2.35
KB
-rw-r--r--
leds-pca9532.h
866
B
-rw-r--r--
leds-regulator.h
1.14
KB
-rw-r--r--
leds-ti-lmu-common.h
1.12
KB
-rw-r--r--
leds.h
18.26
KB
-rw-r--r--
libata.h
66.97
KB
-rw-r--r--
libfdt.h
202
B
-rw-r--r--
libfdt_env.h
491
B
-rw-r--r--
libgcc.h
463
B
-rw-r--r--
libnvdimm.h
10.38
KB
-rw-r--r--
libps2.h
1.89
KB
-rw-r--r--
license.h
418
B
-rw-r--r--
limits.h
715
B
-rw-r--r--
linear_range.h
1.72
KB
-rw-r--r--
linkage.h
9.57
KB
-rw-r--r--
linkmode.h
2.5
KB
-rw-r--r--
linux_logo.h
1.91
KB
-rw-r--r--
lis3lv02d.h
5
KB
-rw-r--r--
list.h
29.78
KB
-rw-r--r--
list_bl.h
4.79
KB
-rw-r--r--
list_lru.h
7.29
KB
-rw-r--r--
list_nulls.h
4.22
KB
-rw-r--r--
list_sort.h
374
B
-rw-r--r--
litex.h
2.06
KB
-rw-r--r--
livepatch.h
8.22
KB
-rw-r--r--
llc.h
749
B
-rw-r--r--
llist.h
9.06
KB
-rw-r--r--
local_lock.h
1.33
KB
-rw-r--r--
local_lock_internal.h
3.42
KB
-rw-r--r--
lockdep.h
21.34
KB
-rw-r--r--
lockdep_types.h
5.18
KB
-rw-r--r--
lockref.h
1.5
KB
-rw-r--r--
log2.h
6.22
KB
-rw-r--r--
logic_iomem.h
2.07
KB
-rw-r--r--
logic_pio.h
3.23
KB
-rw-r--r--
lp.h
2.76
KB
-rw-r--r--
lru_cache.h
11.83
KB
-rw-r--r--
lsm_audit.h
2.81
KB
-rw-r--r--
lsm_hook_defs.h
20.89
KB
-rw-r--r--
lsm_hooks.h
74.39
KB
-rw-r--r--
lz4.h
26.44
KB
-rw-r--r--
lzo.h
1.55
KB
-rw-r--r--
mISDNdsp.h
1.19
KB
-rw-r--r--
mISDNhw.h
5.39
KB
-rw-r--r--
mISDNif.h
14.89
KB
-rw-r--r--
mailbox_client.h
1.71
KB
-rw-r--r--
mailbox_controller.h
5.67
KB
-rw-r--r--
maple.h
2.71
KB
-rw-r--r--
marvell_phy.h
1.69
KB
-rw-r--r--
math.h
5.06
KB
-rw-r--r--
math64.h
7.48
KB
-rw-r--r--
mbcache.h
1.57
KB
-rw-r--r--
mbus.h
3.09
KB
-rw-r--r--
mc146818rtc.h
4.46
KB
-rw-r--r--
mc6821.h
1.18
KB
-rw-r--r--
mcb.h
3.72
KB
-rw-r--r--
mdev.h
5.94
KB
-rw-r--r--
mdio-bitbang.h
1.31
KB
-rw-r--r--
mdio-gpio.h
177
B
-rw-r--r--
mdio-mux.h
1021
B
-rw-r--r--
mdio.h
12.42
KB
-rw-r--r--
mei_cl_bus.h
3.75
KB
-rw-r--r--
mem_encrypt.h
885
B
-rw-r--r--
memblock.h
20.08
KB
-rw-r--r--
memcontrol.h
43.18
KB
-rw-r--r--
memfd.h
365
B
-rw-r--r--
memory.h
5.99
KB
-rw-r--r--
memory_hotplug.h
11.42
KB
-rw-r--r--
mempolicy.h
7.37
KB
-rw-r--r--
mempool.h
3.35
KB
-rw-r--r--
memregion.h
403
B
-rw-r--r--
memremap.h
5.97
KB
-rw-r--r--
memstick.h
9.6
KB
-rw-r--r--
mhi.h
26.04
KB
-rw-r--r--
micrel_phy.h
1.85
KB
-rw-r--r--
microchipphy.h
2.65
KB
-rw-r--r--
migrate.h
5.41
KB
-rw-r--r--
migrate_mode.h
758
B
-rw-r--r--
mii.h
16.33
KB
-rw-r--r--
mii_timestamper.h
3.57
KB
-rw-r--r--
min_heap.h
3.3
KB
-rw-r--r--
minmax.h
4.02
KB
-rw-r--r--
misc_cgroup.h
2.95
KB
-rw-r--r--
miscdevice.h
3.2
KB
-rw-r--r--
mm.h
104.32
KB
-rw-r--r--
mm_inline.h
2.92
KB
-rw-r--r--
mm_types.h
25.06
KB
-rw-r--r--
mm_types_task.h
2.5
KB
-rw-r--r--
mman.h
3.91
KB
-rw-r--r--
mmap_lock.h
4.25
KB
-rw-r--r--
mmdebug.h
2.25
KB
-rw-r--r--
mmiotrace.h
3.05
KB
-rw-r--r--
mmu_context.h
855
B
-rw-r--r--
mmu_notifier.h
24.64
KB
-rw-r--r--
mmzone.h
46.64
KB
-rw-r--r--
mnt_namespace.h
697
B
-rw-r--r--
mod_devicetable.h
23.42
KB
-rw-r--r--
module.h
23.58
KB
-rw-r--r--
module_signature.h
1.22
KB
-rw-r--r--
moduleloader.h
3.1
KB
-rw-r--r--
moduleparam.h
22.34
KB
-rw-r--r--
most.h
12.2
KB
-rw-r--r--
mount.h
3.93
KB
-rw-r--r--
moxtet.h
2.36
KB
-rw-r--r--
mpage.h
737
B
-rw-r--r--
mpi.h
8.41
KB
-rw-r--r--
mpls.h
394
B
-rw-r--r--
mpls_iptunnel.h
178
B
-rw-r--r--
mroute.h
1.97
KB
-rw-r--r--
mroute6.h
2.41
KB
-rw-r--r--
mroute_base.h
12.17
KB
-rw-r--r--
msdos_fs.h
273
B
-rw-r--r--
msdos_partition.h
1.62
KB
-rw-r--r--
msg.h
395
B
-rw-r--r--
msi.h
15.36
KB
-rw-r--r--
mtio.h
1.29
KB
-rw-r--r--
mutex.h
6.65
KB
-rw-r--r--
mv643xx.h
51.05
KB
-rw-r--r--
mv643xx_eth.h
1.95
KB
-rw-r--r--
mv643xx_i2c.h
335
B
-rw-r--r--
mvebu-pmsu.h
520
B
-rw-r--r--
mxm-wmi.h
399
B
-rw-r--r--
namei.h
3.85
KB
-rw-r--r--
nd.h
5.55
KB
-rw-r--r--
ndctl.h
674
B
-rw-r--r--
net.h
11.43
KB
-rw-r--r--
netdev_features.h
10.75
KB
-rw-r--r--
netdevice.h
167.19
KB
-rw-r--r--
netfilter.h
13.47
KB
-rw-r--r--
netfilter_bridge.h
2.08
KB
-rw-r--r--
netfilter_defs.h
486
B
-rw-r--r--
netfilter_ingress.h
1.44
KB
-rw-r--r--
netfilter_ipv4.h
1.05
KB
-rw-r--r--
netfilter_ipv6.h
5.85
KB
-rw-r--r--
netfs.h
7.93
KB
-rw-r--r--
netlink.h
7.88
KB
-rw-r--r--
netpoll.h
2.4
KB
-rw-r--r--
nfs.h
1.31
KB
-rw-r--r--
nfs3.h
260
B
-rw-r--r--
nfs4.h
18.95
KB
-rw-r--r--
nfs_fs.h
17.83
KB
-rw-r--r--
nfs_fs_i.h
308
B
-rw-r--r--
nfs_fs_sb.h
10.05
KB
-rw-r--r--
nfs_iostat.h
4.18
KB
-rw-r--r--
nfs_page.h
6.75
KB
-rw-r--r--
nfs_ssc.h
2.01
KB
-rw-r--r--
nfs_xdr.h
42.12
KB
-rw-r--r--
nfsacl.h
1.4
KB
-rw-r--r--
nitro_enclaves.h
267
B
-rw-r--r--
nl802154.h
3.85
KB
-rw-r--r--
nls.h
3.08
KB
-rw-r--r--
nmi.h
6.72
KB
-rw-r--r--
node.h
4.67
KB
-rw-r--r--
nodemask.h
16.99
KB
-rw-r--r--
nospec.h
2.16
KB
-rw-r--r--
notifier.h
7.84
KB
-rw-r--r--
ns_common.h
283
B
-rw-r--r--
nsc_gpio.h
1.42
KB
-rw-r--r--
nsproxy.h
3.09
KB
-rw-r--r--
ntb.h
52.43
KB
-rw-r--r--
ntb_transport.h
3.8
KB
-rw-r--r--
nubus.h
5.54
KB
-rw-r--r--
numa.h
1.25
KB
-rw-r--r--
nvme-fc-driver.h
45.86
KB
-rw-r--r--
nvme-fc.h
9.72
KB
-rw-r--r--
nvme-rdma.h
2.04
KB
-rw-r--r--
nvme-tcp.h
4.4
KB
-rw-r--r--
nvme.h
37.72
KB
-rw-r--r--
nvmem-consumer.h
6.78
KB
-rw-r--r--
nvmem-provider.h
4.72
KB
-rw-r--r--
nvram.h
3.47
KB
-rw-r--r--
objagg.h
1.99
KB
-rw-r--r--
objtool.h
4.35
KB
-rw-r--r--
of.h
43.24
KB
-rw-r--r--
of_address.h
4.17
KB
-rw-r--r--
of_clk.h
819
B
-rw-r--r--
of_device.h
2.93
KB
-rw-r--r--
of_dma.h
2.3
KB
-rw-r--r--
of_fdt.h
3.65
KB
-rw-r--r--
of_gpio.h
4.07
KB
-rw-r--r--
of_graph.h
3.55
KB
-rw-r--r--
of_iommu.h
505
B
-rw-r--r--
of_irq.h
3.75
KB
-rw-r--r--
of_mdio.h
3.51
KB
-rw-r--r--
of_net.h
824
B
-rw-r--r--
of_pci.h
915
B
-rw-r--r--
of_pdt.h
1.14
KB
-rw-r--r--
of_platform.h
3.88
KB
-rw-r--r--
of_reserved_mem.h
2.24
KB
-rw-r--r--
oid_registry.h
5.63
KB
-rw-r--r--
olpc-ec.h
1.95
KB
-rw-r--r--
omap-dma.h
9.79
KB
-rw-r--r--
omap-gpmc.h
2.64
KB
-rw-r--r--
omap-iommu.h
880
B
-rw-r--r--
omap-mailbox.h
689
B
-rw-r--r--
omapfb.h
576
B
-rw-r--r--
once.h
1.87
KB
-rw-r--r--
once_lite.h
677
B
-rw-r--r--
oom.h
3.26
KB
-rw-r--r--
openvswitch.h
403
B
-rw-r--r--
osq_lock.h
1.04
KB
-rw-r--r--
overflow.h
6.42
KB
-rw-r--r--
packing.h
1.76
KB
-rw-r--r--
padata.h
6.12
KB
-rw-r--r--
page-flags-layout.h
3.21
KB
-rw-r--r--
page-flags.h
28
KB
-rw-r--r--
page-isolation.h
1.64
KB
-rw-r--r--
page_counter.h
2.3
KB
-rw-r--r--
page_ext.h
1.83
KB
-rw-r--r--
page_idle.h
2.61
KB
-rw-r--r--
page_owner.h
2.31
KB
-rw-r--r--
page_ref.h
4.95
KB
-rw-r--r--
page_reporting.h
895
B
-rw-r--r--
pageblock-flags.h
2.31
KB
-rw-r--r--
pagemap.h
31.48
KB
-rw-r--r--
pagevec.h
2.04
KB
-rw-r--r--
pagewalk.h
4.07
KB
-rw-r--r--
panic.h
2.69
KB
-rw-r--r--
panic_notifier.h
296
B
-rw-r--r--
parman.h
2.87
KB
-rw-r--r--
parport.h
19.09
KB
-rw-r--r--
parport_pc.h
6.56
KB
-rw-r--r--
parser.h
1.17
KB
-rw-r--r--
part_stat.h
2.38
KB
-rw-r--r--
pata_arasan_cf_data.h
1.22
KB
-rw-r--r--
patchkey.h
757
B
-rw-r--r--
path.h
572
B
-rw-r--r--
pch_dma.h
408
B
-rw-r--r--
pci-acpi.h
3.85
KB
-rw-r--r--
pci-ats.h
1.76
KB
-rw-r--r--
pci-dma-compat.h
3.66
KB
-rw-r--r--
pci-ecam.h
3.2
KB
-rw-r--r--
pci-ep-cfs.h
956
B
-rw-r--r--
pci-epc.h
9.47
KB
-rw-r--r--
pci-epf.h
7.06
KB
-rw-r--r--
pci-p2pdma.h
3.95
KB
-rw-r--r--
pci.h
89.14
KB
-rw-r--r--
pci_hotplug.h
4.35
KB
-rw-r--r--
pci_ids.h
120.69
KB
-rw-r--r--
pcs-lynx.h
425
B
-rw-r--r--
pda_power.h
1005
B
-rw-r--r--
pe.h
16.17
KB
-rw-r--r--
percpu-defs.h
18.12
KB
-rw-r--r--
percpu-refcount.h
10.84
KB
-rw-r--r--
percpu-rwsem.h
4.16
KB
-rw-r--r--
percpu.h
4.65
KB
-rw-r--r--
percpu_counter.h
4.39
KB
-rw-r--r--
perf_event.h
46.38
KB
-rw-r--r--
perf_regs.h
1.05
KB
-rw-r--r--
personality.h
393
B
-rw-r--r--
pfn.h
666
B
-rw-r--r--
pfn_t.h
3.21
KB
-rw-r--r--
pgtable.h
43.89
KB
-rw-r--r--
phonet.h
537
B
-rw-r--r--
phy.h
57.75
KB
-rw-r--r--
phy_fixed.h
1.77
KB
-rw-r--r--
phy_led_triggers.h
1.01
KB
-rw-r--r--
phylink.h
19.37
KB
-rw-r--r--
pid.h
6.29
KB
-rw-r--r--
pid_namespace.h
2.08
KB
-rw-r--r--
pim.h
2.67
KB
-rw-r--r--
pipe_fs_i.h
8.68
KB
-rw-r--r--
pkeys.h
990
B
-rw-r--r--
pktcdvd.h
5.87
KB
-rw-r--r--
pl320-ipc.h
209
B
-rw-r--r--
platform_device.h
12.21
KB
-rw-r--r--
platform_profile.h
1.12
KB
-rw-r--r--
pldmfw.h
4.76
KB
-rw-r--r--
plist.h
8.66
KB
-rw-r--r--
pm-trace.h
940
B
-rw-r--r--
pm.h
32.64
KB
-rw-r--r--
pm_clock.h
2.58
KB
-rw-r--r--
pm_domain.h
13.18
KB
-rw-r--r--
pm_opp.h
15.1
KB
-rw-r--r--
pm_qos.h
10.41
KB
-rw-r--r--
pm_runtime.h
18.5
KB
-rw-r--r--
pm_wakeirq.h
1.31
KB
-rw-r--r--
pm_wakeup.h
6.28
KB
-rw-r--r--
pmbus.h
2.48
KB
-rw-r--r--
pmu.h
2.44
KB
-rw-r--r--
pnfs_osd_xdr.h
9.27
KB
-rw-r--r--
pnp.h
14.9
KB
-rw-r--r--
poison.h
2.48
KB
-rw-r--r--
poll.h
4.07
KB
-rw-r--r--
posix-clock.h
3.91
KB
-rw-r--r--
posix-timers.h
7.01
KB
-rw-r--r--
posix_acl.h
3.27
KB
-rw-r--r--
posix_acl_xattr.h
1.75
KB
-rw-r--r--
power_supply.h
17.74
KB
-rw-r--r--
powercap.h
11.98
KB
-rw-r--r--
ppp-comp.h
2.95
KB
-rw-r--r--
ppp_channel.h
2.99
KB
-rw-r--r--
ppp_defs.h
305
B
-rw-r--r--
pps_kernel.h
2.9
KB
-rw-r--r--
pr.h
566
B
-rw-r--r--
prandom.h
3.75
KB
-rw-r--r--
preempt.h
11.99
KB
-rw-r--r--
prefetch.h
1.7
KB
-rw-r--r--
prime_numbers.h
1.35
KB
-rw-r--r--
printk.h
22.19
KB
-rw-r--r--
prmt.h
143
B
-rw-r--r--
proc_fs.h
8.35
KB
-rw-r--r--
proc_ns.h
2.57
KB
-rw-r--r--
processor.h
1.84
KB
-rw-r--r--
profile.h
2.65
KB
-rw-r--r--
projid.h
2.22
KB
-rw-r--r--
property.h
17.28
KB
-rw-r--r--
pruss_driver.h
1.27
KB
-rw-r--r--
psci.h
1.52
KB
-rw-r--r--
pseudo_fs.h
355
B
-rw-r--r--
psi.h
1.56
KB
-rw-r--r--
psi_types.h
4.31
KB
-rw-r--r--
psp-sev.h
17.76
KB
-rw-r--r--
psp-tee.h
2.65
KB
-rw-r--r--
pstore.h
7.63
KB
-rw-r--r--
pstore_blk.h
1.53
KB
-rw-r--r--
pstore_ram.h
3.82
KB
-rw-r--r--
pstore_zone.h
2.28
KB
-rw-r--r--
ptdump.h
567
B
-rw-r--r--
pti.h
240
B
-rw-r--r--
ptp_classify.h
4.35
KB
-rw-r--r--
ptp_clock_kernel.h
12.2
KB
-rw-r--r--
ptp_kvm.h
430
B
-rw-r--r--
ptp_pch.h
623
B
-rw-r--r--
ptr_ring.h
16.29
KB
-rw-r--r--
ptrace.h
14.92
KB
-rw-r--r--
purgatory.h
590
B
-rw-r--r--
pvclock_gtod.h
548
B
-rw-r--r--
pwm.h
16.37
KB
-rw-r--r--
pwm_backlight.h
722
B
-rw-r--r--
pxa168_eth.h
728
B
-rw-r--r--
pxa2xx_ssp.h
11.13
KB
-rw-r--r--
qcom-geni-se.h
13.74
KB
-rw-r--r--
qcom_scm.h
3.81
KB
-rw-r--r--
qnx6_fs.h
3.27
KB
-rw-r--r--
quota.h
18.67
KB
-rw-r--r--
quotaops.h
10.28
KB
-rw-r--r--
radix-tree.h
15.63
KB
-rw-r--r--
raid_class.h
2.1
KB
-rw-r--r--
ramfs.h
695
B
-rw-r--r--
random.h
4.12
KB
-rw-r--r--
randomize_kstack.h
1.92
KB
-rw-r--r--
range.h
775
B
-rw-r--r--
ras.h
1.02
KB
-rw-r--r--
ratelimit.h
1.72
KB
-rw-r--r--
ratelimit_types.h
1.14
KB
-rw-r--r--
rational.h
639
B
-rw-r--r--
rbtree.h
8.93
KB
-rw-r--r--
rbtree_augmented.h
9.53
KB
-rw-r--r--
rbtree_latch.h
6.66
KB
-rw-r--r--
rbtree_types.h
946
B
-rw-r--r--
rcu_node_tree.h
3.73
KB
-rw-r--r--
rcu_segcblist.h
10.03
KB
-rw-r--r--
rcu_sync.h
1.46
KB
-rw-r--r--
rculist.h
28.56
KB
-rw-r--r--
rculist_bl.h
3.29
KB
-rw-r--r--
rculist_nulls.h
6.57
KB
-rw-r--r--
rcupdate.h
36.33
KB
-rw-r--r--
rcupdate_trace.h
3.08
KB
-rw-r--r--
rcupdate_wait.h
1.73
KB
-rw-r--r--
rcutiny.h
3.32
KB
-rw-r--r--
rcutree.h
2.34
KB
-rw-r--r--
rcuwait.h
1.84
KB
-rw-r--r--
reboot-mode.h
600
B
-rw-r--r--
reboot.h
2.18
KB
-rw-r--r--
reciprocal_div.h
3.28
KB
-rw-r--r--
refcount.h
12.02
KB
-rw-r--r--
regmap.h
62.95
KB
-rw-r--r--
regset.h
11.35
KB
-rw-r--r--
relay.h
8.47
KB
-rw-r--r--
remoteproc.h
24.77
KB
-rw-r--r--
resctrl.h
6.5
KB
-rw-r--r--
reset-controller.h
3.57
KB
-rw-r--r--
reset.h
30.24
KB
-rw-r--r--
resource.h
339
B
-rw-r--r--
resource_ext.h
2.08
KB
-rw-r--r--
restart_block.h
1.11
KB
-rw-r--r--
rfkill.h
10.52
KB
-rw-r--r--
rhashtable-types.h
3.45
KB
-rw-r--r--
rhashtable.h
37.59
KB
-rw-r--r--
ring_buffer.h
7.43
KB
-rw-r--r--
rio.h
19.02
KB
-rw-r--r--
rio_drv.h
14.32
KB
-rw-r--r--
rio_ids.h
1.08
KB
-rw-r--r--
rio_regs.h
19.07
KB
-rw-r--r--
rmap.h
8.96
KB
-rw-r--r--
rmi.h
12.04
KB
-rw-r--r--
rndis.h
16.86
KB
-rw-r--r--
rodata_test.h
394
B
-rw-r--r--
root_dev.h
619
B
-rw-r--r--
rpmsg.h
8.4
KB
-rw-r--r--
rslib.h
3.67
KB
-rw-r--r--
rtc.h
8.65
KB
-rw-r--r--
rtmutex.h
2.97
KB
-rw-r--r--
rtnetlink.h
4.46
KB
-rw-r--r--
rtsx_common.h
890
B
-rw-r--r--
rtsx_pci.h
39.82
KB
-rw-r--r--
rtsx_usb.h
15.42
KB
-rw-r--r--
rwbase_rt.h
876
B
-rw-r--r--
rwlock.h
4.55
KB
-rw-r--r--
rwlock_api_smp.h
7.65
KB
-rw-r--r--
rwlock_rt.h
2.98
KB
-rw-r--r--
rwlock_types.h
1.77
KB
-rw-r--r--
rwsem.h
7.32
KB
-rw-r--r--
s3c_adc_battery.h
923
B
-rw-r--r--
sbitmap.h
16.9
KB
-rw-r--r--
scatterlist.h
17.56
KB
-rw-r--r--
scc.h
2.84
KB
-rw-r--r--
sched.h
64.44
KB
-rw-r--r--
sched_clock.h
1.44
KB
-rw-r--r--
scmi_protocol.h
26.03
KB
-rw-r--r--
scpi_protocol.h
2.67
KB
-rw-r--r--
screen_info.h
191
B
-rw-r--r--
scs.h
1.83
KB
-rw-r--r--
sctp.h
22.88
KB
-rw-r--r--
scx200.h
1.82
KB
-rw-r--r--
scx200_gpio.h
2.38
KB
-rw-r--r--
sdb.h
4.17
KB
-rw-r--r--
seccomp.h
3.45
KB
-rw-r--r--
secretmem.h
1.17
KB
-rw-r--r--
securebits.h
239
B
-rw-r--r--
security.h
58.64
KB
-rw-r--r--
sed-opal.h
1.66
KB
-rw-r--r--
seg6.h
121
B
-rw-r--r--
seg6_genl.h
136
B
-rw-r--r--
seg6_hmac.h
136
B
-rw-r--r--
seg6_iptunnel.h
148
B
-rw-r--r--
seg6_local.h
100
B
-rw-r--r--
selection.h
1.81
KB
-rw-r--r--
sem.h
599
B
-rw-r--r--
semaphore.h
1.35
KB
-rw-r--r--
seq_buf.h
3.94
KB
-rw-r--r--
seq_file.h
8.75
KB
-rw-r--r--
seq_file_net.h
730
B
-rw-r--r--
seqlock.h
38.18
KB
-rw-r--r--
seqno-fence.h
3.57
KB
-rw-r--r--
serdev.h
9.82
KB
-rw-r--r--
serial.h
630
B
-rw-r--r--
serial_8250.h
6.57
KB
-rw-r--r--
serial_bcm63xx.h
4.73
KB
-rw-r--r--
serial_core.h
17.65
KB
-rw-r--r--
serial_max3100.h
1.19
KB
-rw-r--r--
serial_s3c.h
9.42
KB
-rw-r--r--
serial_sci.h
1.6
KB
-rw-r--r--
serio.h
4.28
KB
-rw-r--r--
set_memory.h
1.66
KB
-rw-r--r--
sfp.h
15.87
KB
-rw-r--r--
sh_clk.h
5.96
KB
-rw-r--r--
sh_dma.h
3.47
KB
-rw-r--r--
sh_eth.h
369
B
-rw-r--r--
sh_intc.h
3.42
KB
-rw-r--r--
sh_timer.h
172
B
-rw-r--r--
shdma-base.h
4.27
KB
-rw-r--r--
shm.h
968
B
-rw-r--r--
shmem_fs.h
5.81
KB
-rw-r--r--
shrinker.h
3.25
KB
-rw-r--r--
signal.h
13.56
KB
-rw-r--r--
signal_types.h
1.68
KB
-rw-r--r--
signalfd.h
817
B
-rw-r--r--
siox.h
2.26
KB
-rw-r--r--
siphash.h
4.53
KB
-rw-r--r--
sizes.h
1.27
KB
-rw-r--r--
skb_array.h
5.18
KB
-rw-r--r--
skbuff.h
133.77
KB
-rw-r--r--
skmsg.h
13.29
KB
-rw-r--r--
slab.h
22.32
KB
-rw-r--r--
slab_def.h
3.02
KB
-rw-r--r--
slimbus.h
6.93
KB
-rw-r--r--
slub_def.h
6.22
KB
-rw-r--r--
sm501-regs.h
11.62
KB
-rw-r--r--
sm501.h
4.02
KB
-rw-r--r--
smc911x.h
294
B
-rw-r--r--
smc91x.h
1.57
KB
-rw-r--r--
smp.h
7.63
KB
-rw-r--r--
smp_types.h
1.35
KB
-rw-r--r--
smpboot.h
1.68
KB
-rw-r--r--
smsc911x.h
1.63
KB
-rw-r--r--
smscphy.h
1.25
KB
-rw-r--r--
sock_diag.h
2.31
KB
-rw-r--r--
socket.h
15.04
KB
-rw-r--r--
sockptr.h
2.23
KB
-rw-r--r--
sonet.h
469
B
-rw-r--r--
sony-laptop.h
1.41
KB
-rw-r--r--
sonypi.h
1.71
KB
-rw-r--r--
sort.h
346
B
-rw-r--r--
sound.h
685
B
-rw-r--r--
soundcard.h
1.59
KB
-rw-r--r--
spinlock.h
14.07
KB
-rw-r--r--
spinlock_api_smp.h
5.47
KB
-rw-r--r--
spinlock_api_up.h
3.31
KB
-rw-r--r--
spinlock_rt.h
4.11
KB
-rw-r--r--
spinlock_types.h
1.78
KB
-rw-r--r--
spinlock_types_raw.h
1.69
KB
-rw-r--r--
spinlock_types_up.h
726
B
-rw-r--r--
spinlock_up.h
2.16
KB
-rw-r--r--
splice.h
3.24
KB
-rw-r--r--
spmi.h
5.59
KB
-rw-r--r--
sram.h
844
B
-rw-r--r--
srcu.h
6.93
KB
-rw-r--r--
srcutiny.h
2.75
KB
-rw-r--r--
srcutree.h
4.8
KB
-rw-r--r--
ssbi.h
726
B
-rw-r--r--
stackdepot.h
768
B
-rw-r--r--
stackleak.h
902
B
-rw-r--r--
stackprotector.h
361
B
-rw-r--r--
stacktrace.h
3.89
KB
-rw-r--r--
start_kernel.h
415
B
-rw-r--r--
stat.h
1.33
KB
-rw-r--r--
statfs.h
1.78
KB
-rw-r--r--
static_call.h
10.17
KB
-rw-r--r--
static_call_types.h
2.74
KB
-rw-r--r--
static_key.h
30
B
-rw-r--r--
stdarg.h
313
B
-rw-r--r--
stddef.h
827
B
-rw-r--r--
stm.h
4.34
KB
-rw-r--r--
stmmac.h
6.75
KB
-rw-r--r--
stmp3xxx_rtc_wdt.h
331
B
-rw-r--r--
stmp_device.h
412
B
-rw-r--r--
stop_machine.h
4.41
KB
-rw-r--r--
string.h
8.66
KB
-rw-r--r--
string_helpers.h
2.69
KB
-rw-r--r--
stringhash.h
2.65
KB
-rw-r--r--
stringify.h
341
B
-rw-r--r--
sungem_phy.h
3.94
KB
-rw-r--r--
sunserialcore.h
1.08
KB
-rw-r--r--
sunxi-rsb.h
2.89
KB
-rw-r--r--
superhyway.h
2.81
KB
-rw-r--r--
surface_acpi_notify.h
1.07
KB
-rw-r--r--
suspend.h
20.02
KB
-rw-r--r--
svga.h
3.75
KB
-rw-r--r--
sw842.h
328
B
-rw-r--r--
swab.h
569
B
-rw-r--r--
swait.h
9.45
KB
-rw-r--r--
swap.h
23.51
KB
-rw-r--r--
swap_cgroup.h
971
B
-rw-r--r--
swap_slots.h
841
B
-rw-r--r--
swapfile.h
556
B
-rw-r--r--
swapops.h
9.61
KB
-rw-r--r--
swiotlb.h
5.33
KB
-rw-r--r--
switchtec.h
10.9
KB
-rw-r--r--
sxgbe_platform.h
1.23
KB
-rw-r--r--
sync_core.h
581
B
-rw-r--r--
sync_file.h
1.57
KB
-rw-r--r--
synclink.h
989
B
-rw-r--r--
sys.h
960
B
-rw-r--r--
sys_soc.h
1.26
KB
-rw-r--r--
syscall_user_dispatch.h
930
B
-rw-r--r--
syscalls.h
55.31
KB
-rw-r--r--
syscore_ops.h
633
B
-rw-r--r--
sysctl.h
7.73
KB
-rw-r--r--
sysfb.h
2.39
KB
-rw-r--r--
sysfs.h
17.64
KB
-rw-r--r--
syslog.h
1.24
KB
-rw-r--r--
sysrq.h
1.96
KB
-rw-r--r--
sysv_fs.h
9.03
KB
-rw-r--r--
t10-pi.h
1.5
KB
-rw-r--r--
task_io_accounting.h
1.13
KB
-rw-r--r--
task_io_accounting_ops.h
2.55
KB
-rw-r--r--
task_work.h
854
B
-rw-r--r--
taskstats_kern.h
957
B
-rw-r--r--
tboot.h
3.33
KB
-rw-r--r--
tc.h
3.45
KB
-rw-r--r--
tca6416_keypad.h
701
B
-rw-r--r--
tcp.h
17.13
KB
-rw-r--r--
tee_drv.h
18.35
KB
-rw-r--r--
textsearch.h
4.73
KB
-rw-r--r--
textsearch_fsm.h
1.19
KB
-rw-r--r--
tfrc.h
1.68
KB
-rw-r--r--
thermal.h
15.41
KB
-rw-r--r--
thread_info.h
6.51
KB
-rw-r--r--
threads.h
1.28
KB
-rw-r--r--
thunderbolt.h
20.78
KB
-rw-r--r--
ti-emif-sram.h
5.15
KB
-rw-r--r--
ti_wilink_st.h
13.46
KB
-rw-r--r--
tick.h
9.74
KB
-rw-r--r--
tifm.h
4.66
KB
-rw-r--r--
timb_dma.h
1.17
KB
-rw-r--r--
timb_gpio.h
717
B
-rw-r--r--
time.h
3.14
KB
-rw-r--r--
time32.h
1.75
KB
-rw-r--r--
time64.h
4.49
KB
-rw-r--r--
time_namespace.h
3.82
KB
-rw-r--r--
timecounter.h
4.14
KB
-rw-r--r--
timekeeper_internal.h
5.33
KB
-rw-r--r--
timekeeping.h
7.61
KB
-rw-r--r--
timer.h
7.35
KB
-rw-r--r--
timerfd.h
508
B
-rw-r--r--
timeriomem-rng.h
432
B
-rw-r--r--
timerqueue.h
1.39
KB
-rw-r--r--
timex.h
6.41
KB
-rw-r--r--
tnum.h
3.39
KB
-rw-r--r--
topology.h
5.25
KB
-rw-r--r--
torture.h
4.2
KB
-rw-r--r--
toshiba.h
460
B
-rw-r--r--
tpm.h
10.98
KB
-rw-r--r--
tpm_command.h
847
B
-rw-r--r--
tpm_eventlog.h
6.54
KB
-rw-r--r--
trace.h
1.65
KB
-rw-r--r--
trace_clock.h
667
B
-rw-r--r--
trace_events.h
27.51
KB
-rw-r--r--
trace_recursion.h
5.63
KB
-rw-r--r--
trace_seq.h
3.92
KB
-rw-r--r--
tracefs.h
1018
B
-rw-r--r--
tracehook.h
7.97
KB
-rw-r--r--
tracepoint-defs.h
2.34
KB
-rw-r--r--
tracepoint.h
18.16
KB
-rw-r--r--
transport_class.h
2.51
KB
-rw-r--r--
ts-nbus.h
532
B
-rw-r--r--
tsacct_kern.h
1.2
KB
-rw-r--r--
tty.h
14.52
KB
-rw-r--r--
tty_buffer.h
1.28
KB
-rw-r--r--
tty_driver.h
15.69
KB
-rw-r--r--
tty_flip.h
1.74
KB
-rw-r--r--
tty_ldisc.h
8.46
KB
-rw-r--r--
tty_port.h
7.24
KB
-rw-r--r--
typecheck.h
782
B
-rw-r--r--
types.h
5.64
KB
-rw-r--r--
u64_stats_sync.h
5.69
KB
-rw-r--r--
uacce.h
3.74
KB
-rw-r--r--
uaccess.h
12.93
KB
-rw-r--r--
ucb1400.h
4.12
KB
-rw-r--r--
ucs2_string.h
662
B
-rw-r--r--
udp.h
4.53
KB
-rw-r--r--
uidgid.h
4.07
KB
-rw-r--r--
uio.h
9.01
KB
-rw-r--r--
uio_driver.h
4.71
KB
-rw-r--r--
umh.h
1.79
KB
-rw-r--r--
unicode.h
1.02
KB
-rw-r--r--
units.h
2.59
KB
-rw-r--r--
uprobes.h
6
KB
-rw-r--r--
usb.h
77.55
KB
-rw-r--r--
usb_usual.h
3.58
KB
-rw-r--r--
usbdevice_fs.h
2.18
KB
-rw-r--r--
user-return-notifier.h
1.18
KB
-rw-r--r--
user.h
22
B
-rw-r--r--
user_namespace.h
6.03
KB
-rw-r--r--
userfaultfd_k.h
6.44
KB
-rw-r--r--
usermode_driver.h
515
B
-rw-r--r--
util_macros.h
1.17
KB
-rw-r--r--
uts.h
388
B
-rw-r--r--
utsname.h
1.78
KB
-rw-r--r--
uuid.h
2.34
KB
-rw-r--r--
vbox_utils.h
1.75
KB
-rw-r--r--
vdpa.h
14.02
KB
-rw-r--r--
verification.h
1.77
KB
-rw-r--r--
vermagic.h
1.28
KB
-rw-r--r--
vexpress.h
297
B
-rw-r--r--
vfio.h
8.21
KB
-rw-r--r--
vfio_pci_core.h
7.46
KB
-rw-r--r--
vfs.h
116
B
-rw-r--r--
vga_switcheroo.h
8.62
KB
-rw-r--r--
vgaarb.h
3.88
KB
-rw-r--r--
vhost_iotlb.h
1.34
KB
-rw-r--r--
via-core.h
6.57
KB
-rw-r--r--
via-gpio.h
310
B
-rw-r--r--
via.h
932
B
-rw-r--r--
via_i2c.h
844
B
-rw-r--r--
videodev2.h
2.7
KB
-rw-r--r--
virtio.h
6.7
KB
-rw-r--r--
virtio_byteorder.h
1.46
KB
-rw-r--r--
virtio_caif.h
513
B
-rw-r--r--
virtio_config.h
16.26
KB
-rw-r--r--
virtio_console.h
1.93
KB
-rw-r--r--
virtio_dma_buf.h
1.12
KB
-rw-r--r--
virtio_net.h
5.38
KB
-rw-r--r--
virtio_pci_modern.h
3.25
KB
-rw-r--r--
virtio_ring.h
2.95
KB
-rw-r--r--
virtio_vsock.h
4.79
KB
-rw-r--r--
visorbus.h
12.23
KB
-rw-r--r--
vlynq.h
3.22
KB
-rw-r--r--
vm_event_item.h
3.46
KB
-rw-r--r--
vmacache.h
722
B
-rw-r--r--
vmalloc.h
8.61
KB
-rw-r--r--
vme.h
5.66
KB
-rw-r--r--
vmpressure.h
1.68
KB
-rw-r--r--
vmstat.h
14.76
KB
-rw-r--r--
vmw_vmci_api.h
2.88
KB
-rw-r--r--
vmw_vmci_defs.h
27.5
KB
-rw-r--r--
vringh.h
8.62
KB
-rw-r--r--
vt.h
611
B
-rw-r--r--
vt_buffer.h
1.49
KB
-rw-r--r--
vt_kern.h
5.31
KB
-rw-r--r--
vtime.h
4.6
KB
-rw-r--r--
w1-gpio.h
493
B
-rw-r--r--
w1.h
8.93
KB
-rw-r--r--
wait.h
41.93
KB
-rw-r--r--
wait_bit.h
11.2
KB
-rw-r--r--
watch_queue.h
3.82
KB
-rw-r--r--
watchdog.h
8.21
KB
-rw-r--r--
win_minmax.h
832
B
-rw-r--r--
wireless.h
1.4
KB
-rw-r--r--
wkup_m3_ipc.h
1.75
KB
-rw-r--r--
wl12xx.h
810
B
-rw-r--r--
wm97xx.h
10.59
KB
-rw-r--r--
wmi.h
1.56
KB
-rw-r--r--
workqueue.h
21.38
KB
-rw-r--r--
writeback.h
12.83
KB
-rw-r--r--
ww_mutex.h
13.11
KB
-rw-r--r--
wwan.h
5.13
KB
-rw-r--r--
xarray.h
56.17
KB
-rw-r--r--
xattr.h
3.97
KB
-rw-r--r--
xxhash.h
8.27
KB
-rw-r--r--
xz.h
11.16
KB
-rw-r--r--
yam.h
2.18
KB
-rw-r--r--
z2_battery.h
299
B
-rw-r--r--
zconf.h
1.73
KB
-rw-r--r--
zlib.h
28.11
KB
-rw-r--r--
zorro.h
3.57
KB
-rw-r--r--
zpool.h
3.24
KB
-rw-r--r--
zsmalloc.h
1.63
KB
-rw-r--r--
zstd.h
48.58
KB
-rw-r--r--
zutil.h
2.73
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : mm.h
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_H #define _LINUX_MM_H #include <linux/errno.h> #ifdef __KERNEL__ #include <linux/mmdebug.h> #include <linux/gfp.h> #include <linux/bug.h> #include <linux/list.h> #include <linux/mmzone.h> #include <linux/rbtree.h> #include <linux/atomic.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> #include <linux/mmap_lock.h> #include <linux/range.h> #include <linux/pfn.h> #include <linux/percpu-refcount.h> #include <linux/bit_spinlock.h> #include <linux/shrinker.h> #include <linux/resource.h> #include <linux/page_ext.h> #include <linux/err.h> #include <linux/page-flags.h> #include <linux/page_ref.h> #include <linux/memremap.h> #include <linux/overflow.h> #include <linux/sizes.h> #include <linux/sched.h> #include <linux/pgtable.h> #include <linux/kasan.h> struct mempolicy; struct anon_vma; struct anon_vma_chain; struct file_ra_state; struct user_struct; struct writeback_control; struct bdi_writeback; struct pt_regs; extern int sysctl_page_lock_unfairness; void init_mm_internals(void); #ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; static inline void set_max_mapnr(unsigned long limit) { max_mapnr = limit; } #else static inline void set_max_mapnr(unsigned long limit) { } #endif extern atomic_long_t _totalram_pages; static inline unsigned long totalram_pages(void) { return (unsigned long)atomic_long_read(&_totalram_pages); } static inline void totalram_pages_inc(void) { atomic_long_inc(&_totalram_pages); } static inline void totalram_pages_dec(void) { atomic_long_dec(&_totalram_pages); } static inline void totalram_pages_add(long count) { atomic_long_add(count, &_totalram_pages); } extern void * high_memory; extern int page_cluster; #ifdef CONFIG_SYSCTL extern int sysctl_legacy_va_layout; #else #define sysctl_legacy_va_layout 0 #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS extern const int mmap_rnd_bits_min; extern const int mmap_rnd_bits_max; extern int mmap_rnd_bits __read_mostly; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS extern const int mmap_rnd_compat_bits_min; extern const int mmap_rnd_compat_bits_max; extern int mmap_rnd_compat_bits __read_mostly; #endif #include <asm/page.h> #include <asm/processor.h> /* * Architectures that support memory tagging (assigning tags to memory regions, * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) #endif #ifndef __pa_symbol #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif #ifndef page_to_virt #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) #endif #ifndef lm_alias #define lm_alias(x) __va(__pa_symbol(x)) #endif /* * To prevent common memory management code establishing * a zero page mapping on a read fault. * This macro should be defined within <asm/pgtable.h>. * s390 does this to prevent multiplexing of hardware bits * related to the physical page in case of virtualization. */ #ifndef mm_forbids_zeropage #define mm_forbids_zeropage(X) (0) #endif /* * On some architectures it is expensive to call memset() for small sizes. * If an architecture decides to implement their own version of * mm_zero_struct_page they should wrap the defines below in a #ifndef and * define their own version of this macro in <asm/pgtable.h> */ #if BITS_PER_LONG == 64 /* This function must be updated when the size of struct page grows above 80 * or reduces below 56. The idea that compiler optimizes out switch() * statement, and only leaves move/store instructions. Also the compiler can * combine write statements if they are both assignments and can be reordered, * this can result in several of the writes here being dropped. */ #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) static inline void __mm_zero_struct_page(struct page *page) { unsigned long *_pp = (void *)page; /* Check that struct page is either 56, 64, 72, or 80 bytes */ BUILD_BUG_ON(sizeof(struct page) & 7); BUILD_BUG_ON(sizeof(struct page) < 56); BUILD_BUG_ON(sizeof(struct page) > 80); switch (sizeof(struct page)) { case 80: _pp[9] = 0; fallthrough; case 72: _pp[8] = 0; fallthrough; case 64: _pp[7] = 0; fallthrough; case 56: _pp[6] = 0; _pp[5] = 0; _pp[4] = 0; _pp[3] = 0; _pp[2] = 0; _pp[1] = 0; _pp[0] = 0; } } #else #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) #endif /* * Default maximum number of active map areas, this limits the number of vmas * per mm struct. Users can overwrite this number by sysctl but there is a * problem. * * When a program's coredump is generated as ELF format, a section is created * per a vma. In ELF, the number of sections is represented in unsigned short. * This means the number of sections should be smaller than 65535 at coredump. * Because the kernel adds some informative sections to a image of program at * generating coredump, we need some margin. The number of extra sections is * 1-3 now and depends on arch. We use "5" as safe margin, here. * * ELF extended numbering allows more than 65535 sections, so 16-bit bound is * not a hard limit any more. Although some userspace tools can be surprised by * that. */ #define MAPCOUNT_ELF_CORE_MARGIN (5) #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) extern int sysctl_max_map_count; extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, loff_t *); /* * Any attempt to mark this function as static leads to build failure * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked() * is referred to by BPF code. This must be visible for error injection. */ int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp, void **shadowp); #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) #else #define nth_page(page,n) ((page) + (n)) #endif /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) void setup_initial_init_mm(void *start_code, void *end_code, void *end_data, void *brk); /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way * we have a virtual fs - giving a cleaner interface to the * mm details, and allowing different kinds of memory mappings * (from shared memory to executable loading to arbitrary * mmap() functions). */ struct vm_area_struct *vm_area_alloc(struct mm_struct *); struct vm_area_struct *vm_area_dup(struct vm_area_struct *); void vm_area_free(struct vm_area_struct *); #ifndef CONFIG_MMU extern struct rb_root nommu_region_tree; extern struct rw_semaphore nommu_region_sem; extern unsigned int kobjsize(const void *objp); #endif /* * vm_flags in vm_area_struct, see mm_types.h. * When changing, update also include/trace/events/mmflags.h */ #define VM_NONE 0x00000000 #define VM_READ 0x00000001 /* currently active flags */ #define VM_WRITE 0x00000002 #define VM_EXEC 0x00000004 #define VM_SHARED 0x00000008 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020 #define VM_MAYEXEC 0x00000040 #define VM_MAYSHARE 0x00000080 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_LOCKED 0x00002000 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ /* Used by sys_madvise() */ #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_SYNC 0x00800000 /* Synchronous page faults */ #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ #define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ #ifdef CONFIG_MEM_SOFT_DIRTY # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ #else # define VM_SOFTDIRTY 0 #endif #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ #ifdef CONFIG_ARCH_HAS_PKEYS # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 #ifdef CONFIG_PPC # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 #else # define VM_PKEY_BIT4 0 #endif #endif /* CONFIG_ARCH_HAS_PKEYS */ #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ #elif defined(CONFIG_PPC) # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_SPARC64) # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ # define VM_ARCH_CLEAR VM_SPARC_ADI #elif defined(CONFIG_ARM64) # define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ # define VM_ARCH_CLEAR VM_ARM64_BTI #elif !defined(CONFIG_MMU) # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif #if defined(CONFIG_ARM64_MTE) # define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ # define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ #else # define VM_MTE VM_NONE # define VM_MTE_ALLOWED VM_NONE #endif #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR # define VM_UFFD_MINOR_BIT 37 # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ # define VM_UFFD_MINOR VM_NONE #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) /* Common data flag combinations */ #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ VM_MAYWRITE | VM_MAYEXEC) #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC #endif #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif #ifdef CONFIG_STACK_GROWSUP #define VM_STACK VM_GROWSUP #else #define VM_STACK VM_GROWSDOWN #endif #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) /* VMA basic access permission flags */ #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) /* * Special vmas that are non-mergable, non-mlock()able. */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) /* This mask prevents VMA from being scanned with khugepaged */ #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE /* This mask is used to clear all the VMA flags used by mlock */ #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) /* Arch-specific flags to clear when updating VM flags on protection change */ #ifndef VM_ARCH_CLEAR # define VM_ARCH_CLEAR VM_NONE #endif #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ extern pgprot_t protection_map[16]; /** * enum fault_flag - Fault flag definitions. * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. * @FAULT_FLAG_TRIED: The fault has been tried once. * @FAULT_FLAG_USER: The fault originated in userspace. * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. * * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify * whether we would allow page faults to retry by specifying these two * fault flags correctly. Currently there can be three legal combinations: * * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and * this is the first try * * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and * we've already tried at least once * * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry * * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never * be used. Note that page faults can be allowed to retry for multiple times, * in which case we'll have an initial fault with flags (a) then later on * continuous faults with flags (b). We should always try to detect pending * signals before a retry to make sure the continuous page faults can still be * interrupted if necessary. */ enum fault_flag { FAULT_FLAG_WRITE = 1 << 0, FAULT_FLAG_MKWRITE = 1 << 1, FAULT_FLAG_ALLOW_RETRY = 1 << 2, FAULT_FLAG_RETRY_NOWAIT = 1 << 3, FAULT_FLAG_KILLABLE = 1 << 4, FAULT_FLAG_TRIED = 1 << 5, FAULT_FLAG_USER = 1 << 6, FAULT_FLAG_REMOTE = 1 << 7, FAULT_FLAG_INSTRUCTION = 1 << 8, FAULT_FLAG_INTERRUPTIBLE = 1 << 9, }; /* * The default fault flags that should be used by most of the * arch-specific page fault handlers. */ #define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ FAULT_FLAG_KILLABLE | \ FAULT_FLAG_INTERRUPTIBLE) /** * fault_flag_allow_retry_first - check ALLOW_RETRY the first time * @flags: Fault flags. * * This is mostly used for places where we want to try to avoid taking * the mmap_lock for too long a time when waiting for another condition * to change, in which case we can try to be polite to release the * mmap_lock in the first round to avoid potential starvation of other * processes that would also want the mmap_lock. * * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. */ static inline bool fault_flag_allow_retry_first(enum fault_flag flags) { return (flags & FAULT_FLAG_ALLOW_RETRY) && (!(flags & FAULT_FLAG_TRIED)); } #define FAULT_FLAG_TRACE \ { FAULT_FLAG_WRITE, "WRITE" }, \ { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ { FAULT_FLAG_TRIED, "TRIED" }, \ { FAULT_FLAG_USER, "USER" }, \ { FAULT_FLAG_REMOTE, "REMOTE" }, \ { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } /* * vm_fault is filled by the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * * MM layer fills up gfp_mask for page allocations but fault handler might * alter it if its implementation requires a different allocation context. * * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { const struct { struct vm_area_struct *vma; /* Target VMA */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ }; enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ pud_t *pud; /* Pointer to pud entry matching * the 'address' */ union { pte_t orig_pte; /* Value of PTE at the time of fault */ pmd_t orig_pmd; /* Value of PMD at the time of fault, * used by PMD fault only. */ }; struct page *cow_page; /* Page handler may use for COW fault */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by * VM_FAULT_ERROR). */ /* These three entries are valid only while holding ptl lock */ pte_t *pte; /* Pointer to pte entry matching * the 'address'. NULL if the page * table hasn't been allocated. */ spinlock_t *ptl; /* Page table lock. * Protects pte page table if 'pte' * is not NULL, otherwise pmd. */ pgtable_t prealloc_pte; /* Pre-allocated pte page table. * vm_ops->map_pages() sets up a page * table from atomic context. * do_fault_around() pre-allocates * page table to avoid allocation from * atomic context. */ }; /* page entry size for vm->huge_fault() */ enum page_entry_size { PE_SIZE_PTE = 0, PE_SIZE_PMD, PE_SIZE_PUD, }; /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer * to the functions called when a no-page or a wp-page exception occurs. */ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); /* Called any time before splitting to check if it's allowed */ int (*may_split)(struct vm_area_struct *area, unsigned long addr); int (*mremap)(struct vm_area_struct *area); /* * Called by mprotect() to make driver-specific permission * checks before mprotect() is finalised. The VMA must not * be modified. Returns 0 if eprotect() can proceed. */ int (*mprotect)(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long newflags); vm_fault_t (*fault)(struct vm_fault *vmf); vm_fault_t (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); vm_fault_t (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); unsigned long (*pagesize)(struct vm_area_struct * area); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs. See also generic_access_phys() for a generic * implementation useful for any iomem mapping. */ int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); /* Called by the /proc/PID/maps code to ask the vma whether it * has a special name. Returning non-NULL will also cause this * vma to be dumped unconditionally. */ const char *(*name)(struct vm_area_struct *vma); #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy * to hold the policy upon return. Caller should pass NULL @new to * remove a policy and fall back to surrounding context--i.e. do not * install a MPOL_DEFAULT policy, nor the task or system default * mempolicy. */ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); /* * get_policy() op must add reference [mpol_get()] to any policy at * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure * in mm/mempolicy.c will do this automatically. * get_policy() must NOT add a ref if the policy at (vma,addr) is not * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. * If no [shared/vma] mempolicy exists at the addr, get_policy() op * must return NULL--i.e., do not "fallback" to task or system default * policy. */ struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr); #endif /* * Called by vm_normal_page() for special PTEs to find the * page for @addr. This is useful if the default behavior * (using pte_page()) would not find the correct page. */ struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); }; static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { static const struct vm_operations_struct dummy_vm_ops = {}; memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_ops = &dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); } static inline void vma_set_anonymous(struct vm_area_struct *vma) { vma->vm_ops = NULL; } static inline bool vma_is_anonymous(struct vm_area_struct *vma) { return !vma->vm_ops; } static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) { int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); if (!maybe_stack) return false; if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == VM_STACK_INCOMPLETE_SETUP) return true; return false; } static inline bool vma_is_foreign(struct vm_area_struct *vma) { if (!current->mm) return true; if (current->mm != vma->vm_mm) return true; return false; } static inline bool vma_is_accessible(struct vm_area_struct *vma) { return vma->vm_flags & VM_ACCESS_FLAGS; } #ifdef CONFIG_SHMEM /* * The vma_is_shmem is not inline because it is used only by slow * paths in userfault. */ bool vma_is_shmem(struct vm_area_struct *vma); #else static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } #endif int vma_is_stack_for_current(struct vm_area_struct *vma); /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } struct mmu_gather; struct inode; #include <linux/huge_mm.h> /* * Methods to modify the page usage count. * * What counts for a page usage: * - cache mapping (page->mapping) * - private data (page->private) * - page mapped in a task's page tables, each mapping * is counted separately * * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. */ /* * Drop a ref, return true if the refcount fell to zero (the page has no users) */ static inline int put_page_testzero(struct page *page) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); return page_ref_dec_and_test(page); } /* * Try to grab a ref unless the page has a refcount of zero, return false if * that is the case. * This can be called when MMU is off so it must not access * any of the virtual mappings. */ static inline int get_page_unless_zero(struct page *page) { return page_ref_add_unless(page, 1, 0); } extern int page_is_ram(unsigned long pfn); enum { REGION_INTERSECTS, REGION_DISJOINT, REGION_MIXED, }; int region_intersects(resource_size_t offset, size_t size, unsigned long flags, unsigned long desc); /* Support for virtually mapped pages */ struct page *vmalloc_to_page(const void *addr); unsigned long vmalloc_to_pfn(const void *addr); /* * Determine if an address is within the vmalloc range * * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ #ifndef is_ioremap_addr #define is_ioremap_addr(x) is_vmalloc_addr(x) #endif #ifdef CONFIG_MMU extern bool is_vmalloc_addr(const void *x); extern int is_vmalloc_or_module_addr(const void *x); #else static inline bool is_vmalloc_addr(const void *x) { return false; } static inline int is_vmalloc_or_module_addr(const void *x) { return 0; } #endif extern void *kvmalloc_node(size_t size, gfp_t flags, int node); static inline void *kvmalloc(size_t size, gfp_t flags) { return kvmalloc_node(size, flags, NUMA_NO_NODE); } static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) { return kvmalloc_node(size, flags | __GFP_ZERO, node); } static inline void *kvzalloc(size_t size, gfp_t flags) { return kvmalloc(size, flags | __GFP_ZERO); } static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return kvmalloc(bytes, flags); } static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) { return kvmalloc_array(n, size, flags | __GFP_ZERO); } extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags); extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); static inline int head_compound_mapcount(struct page *head) { return atomic_read(compound_mapcount_ptr(head)) + 1; } /* * Mapcount of compound page as a whole, does not include mapped sub-pages. * * Must be called only for compound pages or any their tail sub-pages. */ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); return head_compound_mapcount(page); } /* * The atomic page->_mapcount, starts from -1: so that transitions * both from it and to it can be tracked, using atomic_inc_and_test * and atomic_add_negative(-1). */ static inline void page_mapcount_reset(struct page *page) { atomic_set(&(page)->_mapcount, -1); } int __page_mapcount(struct page *page); /* * Mapcount of 0-order page; when compound sub-page, includes * compound_mapcount(). * * Result is undefined for pages which cannot be mapped into userspace. * For example SLAB or special types of pages. See function page_has_type(). * They use this place in struct page differently. */ static inline int page_mapcount(struct page *page) { if (unlikely(PageCompound(page))) return __page_mapcount(page); return atomic_read(&page->_mapcount) + 1; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE int total_mapcount(struct page *page); int page_trans_huge_mapcount(struct page *page, int *total_mapcount); #else static inline int total_mapcount(struct page *page) { return page_mapcount(page); } static inline int page_trans_huge_mapcount(struct page *page, int *total_mapcount) { int mapcount = page_mapcount(page); if (total_mapcount) *total_mapcount = mapcount; return mapcount; } #endif static inline struct page *virt_to_head_page(const void *x) { struct page *page = virt_to_page(x); return compound_head(page); } void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); void copy_huge_page(struct page *dst, struct page *src); /* * Compound pages have a destructor function. Provide a * prototype for that function and accessor functions. * These are _only_ valid on the head of a compound page. */ typedef void compound_page_dtor(struct page *); /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ enum compound_dtor_id { NULL_COMPOUND_DTOR, COMPOUND_PAGE_DTOR, #ifdef CONFIG_HUGETLB_PAGE HUGETLB_PAGE_DTOR, #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE TRANSHUGE_PAGE_DTOR, #endif NR_COMPOUND_DTORS, }; extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; static inline void set_compound_page_dtor(struct page *page, enum compound_dtor_id compound_dtor) { VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); page[1].compound_dtor = compound_dtor; } static inline void destroy_compound_page(struct page *page) { VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); compound_page_dtors[page[1].compound_dtor](page); } static inline unsigned int compound_order(struct page *page) { if (!PageHead(page)) return 0; return page[1].compound_order; } static inline bool hpage_pincount_available(struct page *page) { /* * Can the page->hpage_pinned_refcount field be used? That field is in * the 3rd page of the compound page, so the smallest (2-page) compound * pages cannot support it. */ page = compound_head(page); return PageCompound(page) && compound_order(page) > 1; } static inline int head_compound_pincount(struct page *head) { return atomic_read(compound_pincount_ptr(head)); } static inline int compound_pincount(struct page *page) { VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); page = compound_head(page); return head_compound_pincount(page); } static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; page[1].compound_nr = 1U << order; } /* Returns the number of pages in this potentially compound page. */ static inline unsigned long compound_nr(struct page *page) { if (!PageHead(page)) return 1; return page[1].compound_nr; } /* Returns the number of bytes in this potentially compound page. */ static inline unsigned long page_size(struct page *page) { return PAGE_SIZE << compound_order(page); } /* Returns the number of bits needed for the number of bytes in a page */ static inline unsigned int page_shift(struct page *page) { return PAGE_SHIFT + compound_order(page); } void free_compound_page(struct page *page); #ifdef CONFIG_MMU /* * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when * servicing faults for write access. In the normal case, do always want * pte_mkwrite. But get_user_pages can cause write faults for mappings * that do not have writing enabled, when used by access_process_vm. */ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) pte = pte_mkwrite(pte); return pte; } vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of * zeroes, and text pages of executables and shared libraries have * only one copy in memory, at most, normally. * * For the non-reserved pages, page_count(page) denotes a reference count. * page_count() == 0 means the page is free. page->lru is then used for * freelist management in the buddy allocator. * page_count() > 0 means the page has been allocated. * * Pages are allocated by the slab allocator in order to provide memory * to kmalloc and kmem_cache_alloc. In this case, the management of the * page, and the fields in 'struct page' are the responsibility of mm/slab.c * unless a particular usage is carefully commented. (the responsibility of * freeing the kmalloc memory is the caller's, of course). * * A page may be used by anyone else who does a __get_free_page(). * In this case, page_count still tracks the references, and should only * be used through the normal accessor functions. The top bits of page->flags * and page->virtual store page management information, but all other fields * are unused and could be used privately, carefully. The management of this * page is the responsibility of the one who allocated it, and those who have * subsequently been given references to it. * * The other pages (we may call them "pagecache pages") are completely * managed by the Linux memory manager: I/O, buffers, swapping etc. * The following discussion applies only to them. * * A pagecache page contains an opaque `private' member, which belongs to the * page's address_space. Usually, this is the address of a circular list of * the page's disk buffers. PG_private must be set to tell the VM to call * into the filesystem to release these pages. * * A page may belong to an inode's memory mapping. In this case, page->mapping * is the pointer to the inode, and page->index is the file offset of the page, * in units of PAGE_SIZE. * * If pagecache pages are not associated with an inode, they are said to be * anonymous pages. These may become associated with the swapcache, and in that * case PG_swapcache is set, and page->private is an offset into the swapcache. * * In either case (swapcache or inode backed), the pagecache itself holds one * reference to the page. Setting PG_private should also increment the * refcount. The each user mapping also has a reference to the page. * * The pagecache pages are stored in a per-mapping radix tree, which is * rooted at mapping->i_pages, and indexed by offset. * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space * lists, we instead now tag pages as dirty/writeback in the radix tree. * * All pagecache pages may be subject to I/O: * - inode pages may need to be read from disk, * - inode pages which have been modified and are MAP_SHARED may need * to be written back to the inode on disk, * - anonymous pages (including MAP_PRIVATE file mappings) which have been * modified may need to be swapped out to swap space and (later) to be read * back into memory. */ /* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. */ /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) /* * Define the bit shifts to access each section. For non-existent * sections we define the shift as 0; that plus a 0 mask ensures * the compiler will optimise away reference to them. */ #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ #ifdef NODE_NOT_IN_PAGE_FLAGS #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ SECTIONS_PGOFF : ZONES_PGOFF) #else #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ NODES_PGOFF : ZONES_PGOFF) #endif #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) static inline enum zone_type page_zonenum(const struct page *page) { ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } #ifdef CONFIG_ZONE_DEVICE static inline bool is_zone_device_page(const struct page *page) { return page_zonenum(page) == ZONE_DEVICE; } extern void memmap_init_zone_device(struct zone *, unsigned long, unsigned long, struct dev_pagemap *); #else static inline bool is_zone_device_page(const struct page *page) { return false; } #endif static inline bool is_zone_movable_page(const struct page *page) { return page_zonenum(page) == ZONE_MOVABLE; } #ifdef CONFIG_DEV_PAGEMAP_OPS void free_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); static inline bool page_is_devmap_managed(struct page *page) { if (!static_branch_unlikely(&devmap_managed_key)) return false; if (!is_zone_device_page(page)) return false; switch (page->pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_FS_DAX: return true; default: break; } return false; } void put_devmap_managed_page(struct page *page); #else /* CONFIG_DEV_PAGEMAP_OPS */ static inline bool page_is_devmap_managed(struct page *page) { return false; } static inline void put_devmap_managed_page(struct page *page) { } #endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline bool is_device_private_page(const struct page *page) { return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && IS_ENABLED(CONFIG_DEVICE_PRIVATE) && is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_PRIVATE; } static inline bool is_pci_p2pdma_page(const struct page *page) { return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && IS_ENABLED(CONFIG_PCI_P2PDMA) && is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } /* 127: arbitrary random number, small enough to assemble well */ #define page_ref_zero_or_close_to_overflow(page) \ ((unsigned int) page_ref_count(page) + 127u <= 127u) static inline void get_page(struct page *page) { page = compound_head(page); /* * Getting a normal page or the head of a compound page * requires to already have an elevated page->_refcount. */ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); page_ref_inc(page); } bool __must_check try_grab_page(struct page *page, unsigned int flags); struct page *try_grab_compound_head(struct page *page, int refs, unsigned int flags); static inline __must_check bool try_get_page(struct page *page) { page = compound_head(page); if (WARN_ON_ONCE(page_ref_count(page) <= 0)) return false; page_ref_inc(page); return true; } static inline void put_page(struct page *page) { page = compound_head(page); /* * For devmap managed pages we need to catch refcount transition from * 2 to 1, when refcount reach one it means the page is free and we * need to inform the device driver through callback. See * include/linux/memremap.h and HMM for details. */ if (page_is_devmap_managed(page)) { put_devmap_managed_page(page); return; } if (put_page_testzero(page)) __put_page(page); } /* * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload * the page's refcount so that two separate items are tracked: the original page * reference count, and also a new count of how many pin_user_pages() calls were * made against the page. ("gup-pinned" is another term for the latter). * * With this scheme, pin_user_pages() becomes special: such pages are marked as * distinct from normal pages. As such, the unpin_user_page() call (and its * variants) must be used in order to release gup-pinned pages. * * Choice of value: * * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference * counts with respect to pin_user_pages() and unpin_user_page() becomes * simpler, due to the fact that adding an even power of two to the page * refcount has the effect of using only the upper N bits, for the code that * counts up using the bias value. This means that the lower bits are left for * the exclusive use of the original code that increments and decrements by one * (or at least, by much smaller values than the bias value). * * Of course, once the lower bits overflow into the upper bits (and this is * OK, because subtraction recovers the original values), then visual inspection * no longer suffices to directly view the separate counts. However, for normal * applications that don't have huge page reference counts, this won't be an * issue. * * Locking: the lockless algorithm described in page_cache_get_speculative() * and page_cache_gup_pin_speculative() provides safe operation for * get_user_pages and page_mkclean and other calls that race to set up page * table entries. */ #define GUP_PIN_COUNTING_BIAS (1U << 10) void unpin_user_page(struct page *page); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); /** * page_maybe_dma_pinned - Report if a page is pinned for DMA. * @page: The page. * * This function checks if a page has been pinned via a call to * a function in the pin_user_pages() family. * * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, * because it means "definitely not pinned for DMA", but true means "probably * pinned for DMA, but possibly a false positive due to having at least * GUP_PIN_COUNTING_BIAS worth of normal page references". * * False positives are OK, because: a) it's unlikely for a page to get that many * refcounts, and b) all the callers of this routine are expected to be able to * deal gracefully with a false positive. * * For huge pages, the result will be exactly correct. That's because we have * more tracking data available: the 3rd struct page in the compound page is * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS * scheme). * * For more information, please see Documentation/core-api/pin_user_pages.rst. * * Return: True, if it is likely that the page has been "dma-pinned". * False, if the page is definitely not dma-pinned. */ static inline bool page_maybe_dma_pinned(struct page *page) { if (hpage_pincount_available(page)) return compound_pincount(page) > 0; /* * page_ref_count() is signed. If that refcount overflows, then * page_ref_count() returns a negative value, and callers will avoid * further incrementing the refcount. * * Here, for that overflow case, use the signed bit to count a little * bit higher via unsigned math, and thus still get an accurate result. */ return ((unsigned int)page_ref_count(compound_head(page))) >= GUP_PIN_COUNTING_BIAS; } static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } /* * This should most likely only be called during fork() to see whether we * should break the cow immediately for a page on the src mm. */ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, struct page *page) { if (!is_cow_mapping(vma->vm_flags)) return false; if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) return false; return page_maybe_dma_pinned(page); } #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS #endif /* * The identification function is mainly used by the buddy allocator for * determining if two pages could be buddies. We are not really identifying * the zone since we could be using the section number id if we do not have * node id available in page flags. * We only guarantee that it will return the same value for two combinable * pages in a zone. */ static inline int page_zone_id(struct page *page) { return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; } #ifdef NODE_NOT_IN_PAGE_FLAGS extern int page_to_nid(const struct page *page); #else static inline int page_to_nid(const struct page *page) { struct page *p = (struct page *)page; return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; } #endif #ifdef CONFIG_NUMA_BALANCING static inline int cpu_pid_to_cpupid(int cpu, int pid) { return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); } static inline int cpupid_to_pid(int cpupid) { return cpupid & LAST__PID_MASK; } static inline int cpupid_to_cpu(int cpupid) { return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; } static inline int cpupid_to_nid(int cpupid) { return cpu_to_node(cpupid_to_cpu(cpupid)); } static inline bool cpupid_pid_unset(int cpupid) { return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); } static inline bool cpupid_cpu_unset(int cpupid) { return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); } static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) { return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); } #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS static inline int page_cpupid_xchg_last(struct page *page, int cpupid) { return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); } static inline int page_cpupid_last(struct page *page) { return page->_last_cpupid; } static inline void page_cpupid_reset_last(struct page *page) { page->_last_cpupid = -1 & LAST_CPUPID_MASK; } #else static inline int page_cpupid_last(struct page *page) { return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } extern int page_cpupid_xchg_last(struct page *page, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ #else /* !CONFIG_NUMA_BALANCING */ static inline int page_cpupid_xchg_last(struct page *page, int cpupid) { return page_to_nid(page); /* XXX */ } static inline int page_cpupid_last(struct page *page) { return page_to_nid(page); /* XXX */ } static inline int cpupid_to_nid(int cpupid) { return -1; } static inline int cpupid_to_pid(int cpupid) { return -1; } static inline int cpupid_to_cpu(int cpupid) { return -1; } static inline int cpu_pid_to_cpupid(int nid, int pid) { return -1; } static inline bool cpupid_pid_unset(int cpupid) { return true; } static inline void page_cpupid_reset_last(struct page *page) { } static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) { return false; } #endif /* CONFIG_NUMA_BALANCING */ #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid * setting tags for all pages to native kernel tag value 0xff, as the default * value 0x00 maps to 0xff. */ static inline u8 page_kasan_tag(const struct page *page) { u8 tag = 0xff; if (kasan_enabled()) { tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; tag ^= 0xff; } return tag; } static inline void page_kasan_tag_set(struct page *page, u8 tag) { unsigned long old_flags, flags; if (!kasan_enabled()) return; tag ^= 0xff; old_flags = READ_ONCE(page->flags); do { flags = old_flags; flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); } static inline void page_kasan_tag_reset(struct page *page) { if (kasan_enabled()) page_kasan_tag_set(page, 0xff); } #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline u8 page_kasan_tag(const struct page *page) { return 0xff; } static inline void page_kasan_tag_set(struct page *page, u8 tag) { } static inline void page_kasan_tag_reset(struct page *page) { } #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline struct zone *page_zone(const struct page *page) { return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; } static inline pg_data_t *page_pgdat(const struct page *page) { return NODE_DATA(page_to_nid(page)); } #ifdef SECTION_IN_PAGE_FLAGS static inline void set_page_section(struct page *page, unsigned long section) { page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; } static inline unsigned long page_to_section(const struct page *page) { return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; } #endif /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION static inline bool is_pinnable_page(struct page *page) { return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) || is_zero_pfn(page_to_pfn(page)); } #else static inline bool is_pinnable_page(struct page *page) { return true; } #endif static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; } static inline void set_page_node(struct page *page, unsigned long node) { page->flags &= ~(NODES_MASK << NODES_PGSHIFT); page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; } static inline void set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) { set_page_zone(page, zone); set_page_node(page, node); #ifdef SECTION_IN_PAGE_FLAGS set_page_section(page, pfn_to_section_nr(pfn)); #endif } /* * Some inline functions in vmstat.h depend on page_zone() */ #include <linux/vmstat.h> static __always_inline void *lowmem_page_address(const struct page *page) { return page_to_virt(page); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) #define HASHED_PAGE_VIRTUAL #endif #if defined(WANT_PAGE_VIRTUAL) static inline void *page_address(const struct page *page) { return page->virtual; } static inline void set_page_address(struct page *page, void *address) { page->virtual = address; } #define page_address_init() do { } while(0) #endif #if defined(HASHED_PAGE_VIRTUAL) void *page_address(const struct page *page); void set_page_address(struct page *page, void *virtual); void page_address_init(void); #endif #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) #define page_address(page) lowmem_page_address(page) #define set_page_address(page, address) do { } while(0) #define page_address_init() do { } while(0) #endif extern void *page_rmapping(struct page *page); extern struct anon_vma *page_anon_vma(struct page *page); extern struct address_space *page_mapping(struct page *page); extern struct address_space *__page_file_mapping(struct page *); static inline struct address_space *page_file_mapping(struct page *page) { if (unlikely(PageSwapCache(page))) return __page_file_mapping(page); return page->mapping; } extern pgoff_t __page_file_index(struct page *page); /* * Return the pagecache index of the passed page. Regular pagecache pages * use ->index whereas swapcache pages use swp_offset(->private) */ static inline pgoff_t page_index(struct page *page) { if (unlikely(PageSwapCache(page))) return __page_file_index(page); return page->index; } bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); /* * Return true only if the page has been allocated with * ALLOC_NO_WATERMARKS and the low watermark was not * met implying that the system is under some pressure. */ static inline bool page_is_pfmemalloc(const struct page *page) { /* * lru.next has bit 1 set if the page is allocated from the * pfmemalloc reserves. Callers may simply overwrite it if * they do not need to preserve that information. */ return (uintptr_t)page->lru.next & BIT(1); } /* * Only to be called by the page allocator on a freshly allocated * page. */ static inline void set_page_pfmemalloc(struct page *page) { page->lru.next = (void *)BIT(1); } static inline void clear_page_pfmemalloc(struct page *page) { page->lru.next = NULL; } /* * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. */ extern void pagefault_out_of_memory(void); #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) /* * Flags passed to show_mem() and show_free_areas() to suppress output in * various contexts. */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); #ifdef CONFIG_MMU extern bool can_do_mlock(void); #else static inline bool can_do_mlock(void) { return false; } #endif extern int user_shm_lock(size_t, struct ucounts *); extern void user_shm_unlock(size_t, struct ucounts *); /* * Parameter block passed down to zap_pte_range in exceptional cases. */ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ struct page *single_page; /* Locked page to be unmapped */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); struct mmu_notifier_range; void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs); extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); void unmap_mapping_page(struct page *page); void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs) { /* should never happen if there's no MMU */ BUG(); return VM_FAULT_SIGBUS; } static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ BUG(); return -EFAULT; } static inline void unmap_mapping_page(struct page *page) { } static inline void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows) { } static inline void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { } #endif static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) { unmap_mapping_range(mapping, holebegin, holelen, 0); } extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int); extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[], int); extern void vma_do_get_file(struct vm_area_struct *, const char[], int); extern void vma_do_fput(struct vm_area_struct *, const char[], int); #define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \ __LINE__) #define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \ __LINE__) #define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__) #define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__) #ifndef CONFIG_MMU extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int); extern void vmr_do_fput(struct vm_region *, const char[], int); #define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \ __LINE__) #define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__) #endif /* !CONFIG_MMU */ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task, bool bypass_rlim); struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); void account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); void __cancel_dirty_page(struct page *page); static inline void cancel_dirty_page(struct page *page) { /* Avoid atomic ops, locking, etc. when not actually needed. */ if (PageDirty(page)) __cancel_dirty_page(page); } int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks); /* * Flags used by change_protection(). For now we make it a bitmap so * that we can pass in multiple flags just like parameters. However * for now all the callers are only use one of the flags at the same * time. */ /* Whether we should allow dirty bit accounting */ #define MM_CP_DIRTY_ACCT (1UL << 0) /* Whether this protection change is for NUMA hints */ #define MM_CP_PROT_NUMA (1UL << 1) /* Whether this change is for write protecting */ #define MM_CP_UFFD_WP (1UL << 2) /* do wp */ #define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ MM_CP_UFFD_WP_RESOLVE) extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, unsigned long cp_flags); extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); /* * doesn't attempt to fault and will return short. */ int get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); int pin_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); static inline bool get_user_page_fast_only(unsigned long addr, unsigned int gup_flags, struct page **pagep) { return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; } /* * per-process(per-mm_struct) statistics. */ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) { long val = atomic_long_read(&mm->rss_stat.count[member]); #ifdef SPLIT_RSS_COUNTING /* * counter is updated in asynchronous manner and may go to minus. * But it's never be expected number for users. */ if (val < 0) val = 0; #endif return (unsigned long)val; } void mm_trace_rss_stat(struct mm_struct *mm, int member, long count); static inline void add_mm_counter(struct mm_struct *mm, int member, long value) { long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); mm_trace_rss_stat(mm, member, count); } static inline void inc_mm_counter(struct mm_struct *mm, int member) { long count = atomic_long_inc_return(&mm->rss_stat.count[member]); mm_trace_rss_stat(mm, member, count); } static inline void dec_mm_counter(struct mm_struct *mm, int member) { long count = atomic_long_dec_return(&mm->rss_stat.count[member]); mm_trace_rss_stat(mm, member, count); } /* Optimized variant when page is already known not to be PageAnon */ static inline int mm_counter_file(struct page *page) { if (PageSwapBacked(page)) return MM_SHMEMPAGES; return MM_FILEPAGES; } static inline int mm_counter(struct page *page) { if (PageAnon(page)) return MM_ANONPAGES; return mm_counter_file(page); } static inline unsigned long get_mm_rss(struct mm_struct *mm) { return get_mm_counter(mm, MM_FILEPAGES) + get_mm_counter(mm, MM_ANONPAGES) + get_mm_counter(mm, MM_SHMEMPAGES); } static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) { return max(mm->hiwater_rss, get_mm_rss(mm)); } static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) { return max(mm->hiwater_vm, mm->total_vm); } static inline void update_hiwater_rss(struct mm_struct *mm) { unsigned long _rss = get_mm_rss(mm); if ((mm)->hiwater_rss < _rss) (mm)->hiwater_rss = _rss; } static inline void update_hiwater_vm(struct mm_struct *mm) { if (mm->hiwater_vm < mm->total_vm) mm->hiwater_vm = mm->total_vm; } static inline void reset_mm_hiwater_rss(struct mm_struct *mm) { mm->hiwater_rss = get_mm_rss(mm); } static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, struct mm_struct *mm) { unsigned long hiwater_rss = get_mm_hiwater_rss(mm); if (*maxrss < hiwater_rss) *maxrss = hiwater_rss; } #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm); #else static inline void sync_mm_rss(struct mm_struct *mm) { } #endif #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #endif #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP static inline int pte_devmap(pte_t pte) { return 0; } #endif int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pte_t *ptep; __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); return ptep; } #ifdef __PAGETABLE_P4D_FOLDED static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { return 0; } #else int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); #endif #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { return 0; } static inline void mm_inc_nr_puds(struct mm_struct *mm) {} static inline void mm_dec_nr_puds(struct mm_struct *mm) {} #else int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); static inline void mm_inc_nr_puds(struct mm_struct *mm) { if (mm_pud_folded(mm)) return; atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_puds(struct mm_struct *mm) { if (mm_pud_folded(mm)) return; atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); } #endif #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return 0; } static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} #else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); static inline void mm_inc_nr_pmds(struct mm_struct *mm) { if (mm_pmd_folded(mm)) return; atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_pmds(struct mm_struct *mm) { if (mm_pmd_folded(mm)) return; atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); } #endif #ifdef CONFIG_MMU static inline void mm_pgtables_bytes_init(struct mm_struct *mm) { atomic_long_set(&mm->pgtables_bytes, 0); } static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) { return atomic_long_read(&mm->pgtables_bytes); } static inline void mm_inc_nr_ptes(struct mm_struct *mm) { atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_ptes(struct mm_struct *mm) { atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); } #else static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) { return 0; } static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} #endif int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); int __pte_alloc_kernel(pmd_t *pmd); #if defined(CONFIG_MMU) static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? NULL : p4d_offset(pgd, address); } static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address); } static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } #endif /* CONFIG_MMU */ #if USE_SPLIT_PTE_PTLOCKS #if ALLOC_SPLIT_PTLOCKS void __init ptlock_cache_init(void); extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); static inline spinlock_t *ptlock_ptr(struct page *page) { return page->ptl; } #else /* ALLOC_SPLIT_PTLOCKS */ static inline void ptlock_cache_init(void) { } static inline bool ptlock_alloc(struct page *page) { return true; } static inline void ptlock_free(struct page *page) { } static inline spinlock_t *ptlock_ptr(struct page *page) { return &page->ptl; } #endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { return ptlock_ptr(pmd_page(*pmd)); } static inline bool ptlock_init(struct page *page) { /* * prep_new_page() initialize page->private (and therefore page->ptl) * with 0. Make sure nobody took it in use in between. * * It can happen if arch try to use slab for page table allocation: * slab code uses page->slab_cache, which share storage with page->ptl. */ VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); if (!ptlock_alloc(page)) return false; spin_lock_init(ptlock_ptr(page)); return true; } #else /* !USE_SPLIT_PTE_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { return &mm->page_table_lock; } static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct page *page) { return true; } static inline void ptlock_free(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ static inline void pgtable_init(void) { ptlock_cache_init(); pgtable_cache_init(); } static inline bool pgtable_pte_page_ctor(struct page *page) { if (!ptlock_init(page)) return false; __SetPageTable(page); inc_lruvec_page_state(page, NR_PAGETABLE); return true; } static inline void pgtable_pte_page_dtor(struct page *page) { ptlock_free(page); __ClearPageTable(page); dec_lruvec_page_state(page, NR_PAGETABLE); } #define pte_offset_map_lock(mm, pmd, address, ptlp) \ ({ \ spinlock_t *__ptl = pte_lockptr(mm, pmd); \ pte_t *__pte = pte_offset_map(pmd, address); \ *(ptlp) = __ptl; \ spin_lock(__ptl); \ __pte; \ }) #define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ pte_unmap(pte); \ } while (0) #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) #define pte_alloc_map(mm, pmd, address) \ (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ (pte_alloc(mm, pmd) ? \ NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) #if USE_SPLIT_PMD_PTLOCKS static struct page *pmd_to_page(pmd_t *pmd) { unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); return virt_to_page((void *)((unsigned long) pmd & mask)); } static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) { return ptlock_ptr(pmd_to_page(pmd)); } static inline bool pmd_ptlock_init(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE page->pmd_huge_pte = NULL; #endif return ptlock_init(page); } static inline void pmd_ptlock_free(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE VM_BUG_ON_PAGE(page->pmd_huge_pte, page); #endif ptlock_free(page); } #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) #else static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) { return &mm->page_table_lock; } static inline bool pmd_ptlock_init(struct page *page) { return true; } static inline void pmd_ptlock_free(struct page *page) {} #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) #endif static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) { spinlock_t *ptl = pmd_lockptr(mm, pmd); spin_lock(ptl); return ptl; } static inline bool pgtable_pmd_page_ctor(struct page *page) { if (!pmd_ptlock_init(page)) return false; __SetPageTable(page); inc_lruvec_page_state(page, NR_PAGETABLE); return true; } static inline void pgtable_pmd_page_dtor(struct page *page) { pmd_ptlock_free(page); __ClearPageTable(page); dec_lruvec_page_state(page, NR_PAGETABLE); } /* * No scalability reason to split PUD locks yet, but follow the same pattern * as the PMD locks to make it easier if we decide to. The VM should not be * considered ready to switch to split PUD locks yet; there may be places * which need to be converted from page_table_lock. */ static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) { return &mm->page_table_lock; } static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) { spinlock_t *ptl = pud_lockptr(mm, pud); spin_lock(ptl); return ptl; } extern void __init pagecache_init(void); extern void __init free_area_init_memoryless_node(int nid); extern void free_initmem(void); /* * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) * into the buddy system. The freed pages will be poisoned with pattern * "poison" if it's within range [0, UCHAR_MAX]. * Return pages freed into the buddy system. */ extern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(void); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); adjust_managed_page_count(page, 1); } #define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) { SetPageReserved(page); adjust_managed_page_count(page, -1); } /* * Default method to free all the __init memory into the buddy system. * The freed pages will be poisoned with pattern "poison" if it's within * range [0, UCHAR_MAX]. * Return pages freed into the buddy system. */ static inline unsigned long free_initmem_default(int poison) { extern char __init_begin[], __init_end[]; return free_reserved_area(&__init_begin, &__init_end, poison, "unused kernel image (initmem)"); } static inline unsigned long get_num_physpages(void) { int nid; unsigned long phys_pages = 0; for_each_online_node(nid) phys_pages += node_present_pages(nid); return phys_pages; } /* * Using memblock node mappings, an architecture may initialise its * zones, allocate the backing mem_map and account for memory holes in an * architecture independent manner. * * An architecture is expected to register range of page frames backed by * physical memory with memblock_add[_node]() before calling * free_area_init() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() * memblock_add_node(base, size, nid) * free_area_init(max_zone_pfns); */ void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); #ifndef CONFIG_NUMA static inline int early_pfn_to_nid(unsigned long pfn) { return 0; } #else /* please see mm/page_alloc.c */ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); extern void show_mem(unsigned int flags, nodemask_t *nodemask); extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES extern unsigned long arch_reserved_kernel_pages(void); #endif extern __printf(3, 4) void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); /* page_alloc.c */ extern int min_free_kbytes; extern int watermark_boost_factor; extern int watermark_scale_factor; extern bool arch_has_descending_max_zone_pfns(void); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); /* interval_tree.c */ void vma_interval_tree_insert(struct vm_area_struct *node, struct rb_root_cached *root); void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, struct rb_root_cached *root); void vma_interval_tree_remove(struct vm_area_struct *node, struct rb_root_cached *root); struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, unsigned long last); struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, unsigned long start, unsigned long last); #define vma_interval_tree_foreach(vma, root, start, last) \ for (vma = vma_interval_tree_iter_first(root, start, last); \ vma; vma = vma_interval_tree_iter_next(vma, start, last)) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, struct rb_root_cached *root); void anon_vma_interval_tree_remove(struct anon_vma_chain *node, struct rb_root_cached *root); struct anon_vma_chain * anon_vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, unsigned long last); struct anon_vma_chain *anon_vma_interval_tree_iter_next( struct anon_vma_chain *node, unsigned long start, unsigned long last); #ifdef CONFIG_DEBUG_VM_RB void anon_vma_interval_tree_verify(struct anon_vma_chain *node); #endif #define anon_vma_interval_tree_foreach(avc, root, start, last) \ for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) /* mmap.c */ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, struct vm_area_struct *expand); static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) { return __vma_adjust(vma, start, end, pgoff, insert, NULL); } extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int __split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); extern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); static inline int check_data_rlimit(unsigned long rlim, unsigned long new, unsigned long start, unsigned long end_data, unsigned long start_data) { if (rlim < RLIM_INFINITY) { if (((new - start) + (end_data - start_data)) > rlim) return -ENOSPC; } return 0; } extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); extern struct file *get_task_exe_file(struct task_struct *task); extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); extern bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm); extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, const struct vm_special_mapping *spec); /* This is an obsolete alternative to _install_special_mapping. */ extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); unsigned long randomize_stack_top(unsigned long stack_top); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); extern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); #ifdef CONFIG_MMU extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); static inline void mm_populate(unsigned long addr, unsigned long len) { /* Ignore errors */ (void) __mm_populate(addr, len, 1); } #else static inline void mm_populate(unsigned long addr, unsigned long len) {} #endif /* These take the mm semaphore themselves */ extern int __must_check vm_brk(unsigned long, unsigned long); extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); struct vm_unmapped_area_info { #define VM_UNMAPPED_AREA_TOPDOWN 1 unsigned long flags; unsigned long length; unsigned long low_limit; unsigned long high_limit; unsigned long align_mask; unsigned long align_offset; }; extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); /* truncate.c */ extern void truncate_inode_pages(struct address_space *, loff_t); extern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend); extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern vm_fault_t filemap_fault(struct vm_fault *vmf); extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); /* mm/page-writeback.c */ int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, unsigned long address); #if VM_GROWSUP extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #else #define expand_upwards(vma, address) (0) #endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); /** * find_vma_intersection() - Look up the first VMA which intersects the interval * @mm: The process address space. * @start_addr: The inclusive start user address. * @end_addr: The exclusive end user address. * * Returns: The first VMA within the provided range, %NULL otherwise. Assumes * start_addr < end_addr. */ static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr, unsigned long end_addr) { struct vm_area_struct *vma = find_vma(mm, start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma; } /** * vma_lookup() - Find a VMA at a specific address * @mm: The process address space. * @addr: The user address. * * Return: The vm_area_struct at the given address, %NULL otherwise. */ static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = find_vma(mm, addr); if (vma && addr < vma->vm_start) vma = NULL; return vma; } static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start; if (vma->vm_flags & VM_GROWSDOWN) { vm_start -= stack_guard_gap; if (vm_start > vma->vm_start) vm_start = 0; } return vm_start; } static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { unsigned long vm_end = vma->vm_end; if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; if (vm_end < vma->vm_end) vm_end = -PAGE_SIZE; } return vm_end; } static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; } /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end) { struct vm_area_struct *vma = find_vma(mm, vm_start); if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) vma = NULL; return vma; } static inline bool range_in_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) { return (vma && vma->vm_start <= start && end <= vma->vm_end); } #ifdef CONFIG_MMU pgprot_t vm_get_page_prot(unsigned long vm_flags); void vma_set_page_prot(struct vm_area_struct *vma); #else static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) { return __pgprot(0); } static inline void vma_set_page_prot(struct vm_area_struct *vma) { vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); } #endif void vma_set_file(struct vm_area_struct *vma, struct file *file); #ifdef CONFIG_NUMA_BALANCING unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num); vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t pgprot); vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { int err = vm_insert_page(vma, addr, page); if (err == -ENOMEM) return VM_FAULT_OOM; if (err < 0 && err != -EBUSY) return VM_FAULT_SIGBUS; return VM_FAULT_NOPAGE; } #ifndef io_remap_pfn_range static inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); } #endif static inline vm_fault_t vmf_error(int err) { if (err == -ENOMEM) return VM_FAULT_OOM; return VM_FAULT_SIGBUS; } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags); #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ #define FOLL_POPULATE 0x40 /* fault in page */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ #define FOLL_MLOCK 0x1000 /* lock present pages */ #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ #define FOLL_ANON 0x8000 /* don't do file mappings */ #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ #define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ #define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ /* * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each * other. Here is what they mean, and how to use them: * * FOLL_LONGTERM indicates that the page will be held for an indefinite time * period _often_ under userspace control. This is in contrast to * iov_iter_get_pages(), whose usages are transient. * * FIXME: For pages which are part of a filesystem, mappings are subject to the * lifetime enforced by the filesystem and we need guarantees that longterm * users like RDMA and V4L2 only establish mappings which coordinate usage with * the filesystem. Ideas for this coordination include revoking the longterm * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was * added after the problem with filesystems was found FS DAX VMAs are * specifically failed. Filesystem pages are still subject to bugs and use of * FOLL_LONGTERM should be avoided on those pages. * * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call. * Currently only get_user_pages() and get_user_pages_fast() support this flag * and calls to get_user_pages_[un]locked are specifically not allowed. This * is due to an incompatibility with the FS DAX check and * FAULT_FLAG_ALLOW_RETRY. * * In the CMA case: long term pins in a CMA region would unnecessarily fragment * that region. And so, CMA attempts to migrate the page before pinning, when * FOLL_LONGTERM is specified. * * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount, * but an additional pin counting system) will be invoked. This is intended for * anything that gets a page reference and then touches page data (for example, * Direct IO). This lets the filesystem know that some non-file-system entity is * potentially changing the pages' data. In contrast to FOLL_GET (whose pages * are released via put_page()), FOLL_PIN pages must be released, ultimately, by * a call to unpin_user_page(). * * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different * and separate refcounting mechanisms, however, and that means that each has * its own acquire and release mechanisms: * * FOLL_GET: get_user_pages*() to acquire, and put_page() to release. * * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release. * * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call. * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based * calls applied to them, and that's perfectly OK. This is a constraint on the * callers, not on the pages.) * * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never * directly by the caller. That's in order to help avoid mismatches when * releasing pages: get_user_pages*() pages must be released via put_page(), * while pin_user_pages*() pages must be released via unpin_user_page(). * * Please see Documentation/core-api/pin_user_pages.rst for more information. */ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) { if (vm_fault & VM_FAULT_OOM) return -ENOMEM; if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return -EFAULT; return 0; } typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); extern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); extern void init_mem_debugging_and_hardening(void); #ifdef CONFIG_PAGE_POISONING extern void __kernel_poison_pages(struct page *page, int numpages); extern void __kernel_unpoison_pages(struct page *page, int numpages); extern bool _page_poisoning_enabled_early; DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); static inline bool page_poisoning_enabled(void) { return _page_poisoning_enabled_early; } /* * For use in fast paths after init_mem_debugging() has run, or when a * false negative result is not harmful when called too early. */ static inline bool page_poisoning_enabled_static(void) { return static_branch_unlikely(&_page_poisoning_enabled); } static inline void kernel_poison_pages(struct page *page, int numpages) { if (page_poisoning_enabled_static()) __kernel_poison_pages(page, numpages); } static inline void kernel_unpoison_pages(struct page *page, int numpages) { if (page_poisoning_enabled_static()) __kernel_unpoison_pages(page, numpages); } #else static inline bool page_poisoning_enabled(void) { return false; } static inline bool page_poisoning_enabled_static(void) { return false; } static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } static inline void kernel_poison_pages(struct page *page, int numpages) { } static inline void kernel_unpoison_pages(struct page *page, int numpages) { } #endif DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); static inline bool want_init_on_alloc(gfp_t flags) { if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, &init_on_alloc)) return true; return flags & __GFP_ZERO; } DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); static inline bool want_init_on_free(void) { return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, &init_on_free); } extern bool _debug_pagealloc_enabled_early; DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); static inline bool debug_pagealloc_enabled(void) { return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled_early; } /* * For use in fast paths after init_debug_pagealloc() has run, or when a * false negative result is not harmful when called too early. */ static inline bool debug_pagealloc_enabled_static(void) { if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) return false; return static_branch_unlikely(&_debug_pagealloc_enabled); } #ifdef CONFIG_DEBUG_PAGEALLOC /* * To support DEBUG_PAGEALLOC architecture must ensure that * __kernel_map_pages() never fails */ extern void __kernel_map_pages(struct page *page, int numpages, int enable); static inline void debug_pagealloc_map_pages(struct page *page, int numpages) { if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 1); } static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) { if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 0); } #else /* CONFIG_DEBUG_PAGEALLOC */ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); extern int in_gate_area_no_mm(unsigned long addr); extern int in_gate_area(struct mm_struct *mm, unsigned long addr); #else static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; } static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) { return 0; } #endif /* __HAVE_ARCH_GATE_AREA */ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); #ifdef CONFIG_SYSCTL extern int sysctl_drop_caches; int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); #endif void drop_slab(void); void drop_slab_node(int nid); #ifndef CONFIG_MMU #define randomize_va_space 0 #else extern int randomize_va_space; #endif const char * arch_vma_name(struct vm_area_struct *vma); #ifdef CONFIG_MMU void print_vma_addr(char *prefix, unsigned long rip); #else static inline void print_vma_addr(char *prefix, unsigned long rip) { } #endif int vmemmap_remap_free(unsigned long start, unsigned long end, unsigned long reuse); int vmemmap_remap_alloc(unsigned long start, unsigned long end, unsigned long reuse, gfp_t gfp_mask); void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; void *vmemmap_alloc_block_buf(unsigned long size, int node, struct vmem_altmap *altmap); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); int vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); void vmemmap_populate_print_last(void); #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap); #endif void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, unsigned long nr_pages); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, MF_ACTION_REQUIRED = 1 << 1, MF_MUST_KILL = 1 << 2, MF_SOFT_OFFLINE = 1 << 3, }; extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); /* * Error handlers for various types of pages. */ enum mf_result { MF_IGNORED, /* Error: cannot be handled */ MF_FAILED, /* Error: handling failed */ MF_DELAYED, /* Will be handled later */ MF_RECOVERED, /* Successfully recovered */ }; enum mf_action_page_type { MF_MSG_KERNEL, MF_MSG_KERNEL_HIGH_ORDER, MF_MSG_SLAB, MF_MSG_DIFFERENT_COMPOUND, MF_MSG_POISONED_HUGE, MF_MSG_HUGE, MF_MSG_FREE_HUGE, MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, MF_MSG_DIRTY_MLOCKED_LRU, MF_MSG_CLEAN_MLOCKED_LRU, MF_MSG_DIRTY_UNEVICTABLE_LRU, MF_MSG_CLEAN_UNEVICTABLE_LRU, MF_MSG_DIRTY_LRU, MF_MSG_CLEAN_LRU, MF_MSG_TRUNCATED_LRU, MF_MSG_BUDDY, MF_MSG_BUDDY_2ND, MF_MSG_DAX, MF_MSG_UNSPLIT_THP, MF_MSG_UNKNOWN, }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) extern void clear_huge_page(struct page *page, unsigned long addr_hint, unsigned int pages_per_huge_page); extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr_hint, struct vm_area_struct *vma, unsigned int pages_per_huge_page); extern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, unsigned int pages_per_huge_page, bool allow_pagefault); /** * vma_is_special_huge - Are transhuge page-table entries considered special? * @vma: Pointer to the struct vm_area_struct to consider * * Whether transhuge page-table entries are considered "special" following * the definition in vm_normal_page(). * * Return: true if transhuge page-table entries should be considered special, * false otherwise. */ static inline bool vma_is_special_huge(const struct vm_area_struct *vma) { return vma_is_dax(vma) || (vma->vm_file && (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); static inline unsigned int debug_guardpage_minorder(void) { return _debug_guardpage_minorder; } static inline bool debug_guardpage_enabled(void) { return static_branch_unlikely(&_debug_guardpage_enabled); } static inline bool page_is_guard(struct page *page) { if (!debug_guardpage_enabled()) return false; return PageGuard(page); } #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool debug_guardpage_enabled(void) { return false; } static inline bool page_is_guard(struct page *page) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ #if MAX_NUMNODES > 1 void __init setup_nr_node_ids(void); #else static inline void setup_nr_node_ids(void) {} #endif extern int memcmp_pages(struct page *page1, struct page *page2); static inline int pages_identical(struct page *page1, struct page *page2) { return !memcmp_pages(page1, page2); } #ifdef CONFIG_MAPPING_DIRTY_HELPERS unsigned long clean_record_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr, pgoff_t bitmap_pgoff, unsigned long *bitmap, pgoff_t *start, pgoff_t *end); unsigned long wp_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr); #endif extern int sysctl_nr_trim_pages; #ifdef CONFIG_PRINTK void mem_dump_obj(void *object); #else static inline void mem_dump_obj(void *object) {} #endif /** * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it * @seals: the seals to check * @vma: the vma to operate on * * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on * the vma flags. Return 0 if check pass, or <0 for errors. */ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) { if (seals & F_SEAL_FUTURE_WRITE) { /* * New PROT_WRITE and MAP_SHARED mmaps are not allowed when * "future write" seal active. */ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) return -EPERM; /* * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as * MAP_SHARED and read-only, take care to not allow mprotect to * revert protections on such mappings. Do this only for shared * mappings. For private mappings, don't need to mask * VM_MAYWRITE as we still want them to be COW-writable. */ if (vma->vm_flags & VM_SHARED) vma->vm_flags &= ~(VM_MAYWRITE); } return 0; } #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */
Close