22 hotfixes. 13 are cc:stable and the remainder address post-6.14 issues
or aren't considered necessary for -stable kernels. 19 are for MM. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaDLNqwAKCRDdBJ7gKXxA juanAQD4aZn7ACTpbIgDIlLVJouq6OOHEYye9hhxz19UN2mAUgEAn8jPqvBDav3S HxjMFSdgLUQVO03FCs9tpNJchi69nw0= =R3UI -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-05-25-00-58' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull hotfixes from Andrew Morton: "22 hotfixes. 13 are cc:stable and the remainder address post-6.14 issues or aren't considered necessary for -stable kernels. 19 are for MM" * tag 'mm-hotfixes-stable-2025-05-25-00-58' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (22 commits) mailmap: add Jarkko's employer email address mm: fix copy_vma() error handling for hugetlb mappings memcg: always call cond_resched() after fn() mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios mm: vmalloc: only zero-init on vrealloc shrink mm: vmalloc: actually use the in-place vrealloc region alloc_tag: allocate percpu counters for module tags dynamically module: release codetag section when module load fails mm/cma: make detection of highmem_start more robust MAINTAINERS: add mm memory policy section MAINTAINERS: add mm ksm section kasan: avoid sleepable page allocation from atomic context highmem: add folio_test_partial_kmap() MAINTAINERS: add hung-task detector section taskstats: fix struct taskstats breaks backward compatibility since version 15 mm/truncate: fix out-of-bounds when doing a right-aligned split MAINTAINERS: add mm reclaim section MAINTAINERS: update page allocator section mm: fix VM_UFFD_MINOR == VM_SHADOW_STACK on USERFAULTFD=y && ARM64_GCS=y mm: mmap: map MAP_STACK to VM_NOHUGEPAGE only if THP is enabled ...
This commit is contained in:
commit
0f8c0258bf
23 changed files with 338 additions and 91 deletions
1
.mailmap
1
.mailmap
|
@ -313,6 +313,7 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
|
||||||
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
|
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
|
||||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
|
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
|
||||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
|
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
|
||||||
|
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@opinsys.com>
|
||||||
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
|
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
|
||||||
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
|
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
|
||||||
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
|
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
|
||||||
|
|
66
MAINTAINERS
66
MAINTAINERS
|
@ -11106,6 +11106,14 @@ L: linuxppc-dev@lists.ozlabs.org
|
||||||
S: Odd Fixes
|
S: Odd Fixes
|
||||||
F: drivers/tty/hvc/
|
F: drivers/tty/hvc/
|
||||||
|
|
||||||
|
HUNG TASK DETECTOR
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
R: Lance Yang <lance.yang@linux.dev>
|
||||||
|
L: linux-kernel@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: include/linux/hung_task.h
|
||||||
|
F: kernel/hung_task.c
|
||||||
|
|
||||||
I2C ACPI SUPPORT
|
I2C ACPI SUPPORT
|
||||||
M: Mika Westerberg <westeri@kernel.org>
|
M: Mika Westerberg <westeri@kernel.org>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
|
@ -15561,6 +15569,41 @@ W: http://www.linux-mm.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
F: mm/gup.c
|
F: mm/gup.c
|
||||||
|
|
||||||
|
MEMORY MANAGEMENT - KSM (Kernel Samepage Merging)
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: David Hildenbrand <david@redhat.com>
|
||||||
|
R: Xu Xin <xu.xin16@zte.com.cn>
|
||||||
|
R: Chengming Zhou <chengming.zhou@linux.dev>
|
||||||
|
L: linux-mm@kvack.org
|
||||||
|
S: Maintained
|
||||||
|
W: http://www.linux-mm.org
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
|
F: Documentation/admin-guide/mm/ksm.rst
|
||||||
|
F: Documentation/mm/ksm.rst
|
||||||
|
F: include/linux/ksm.h
|
||||||
|
F: include/trace/events/ksm.h
|
||||||
|
F: mm/ksm.c
|
||||||
|
|
||||||
|
MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: David Hildenbrand <david@redhat.com>
|
||||||
|
R: Zi Yan <ziy@nvidia.com>
|
||||||
|
R: Matthew Brost <matthew.brost@intel.com>
|
||||||
|
R: Joshua Hahn <joshua.hahnjy@gmail.com>
|
||||||
|
R: Rakie Kim <rakie.kim@sk.com>
|
||||||
|
R: Byungchul Park <byungchul@sk.com>
|
||||||
|
R: Gregory Price <gourry@gourry.net>
|
||||||
|
R: Ying Huang <ying.huang@linux.alibaba.com>
|
||||||
|
L: linux-mm@kvack.org
|
||||||
|
S: Maintained
|
||||||
|
W: http://www.linux-mm.org
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
|
F: include/linux/mempolicy.h
|
||||||
|
F: include/linux/migrate.h
|
||||||
|
F: mm/mempolicy.c
|
||||||
|
F: mm/migrate.c
|
||||||
|
F: mm/migrate_device.c
|
||||||
|
|
||||||
MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
|
MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
M: Mike Rapoport <rppt@kernel.org>
|
M: Mike Rapoport <rppt@kernel.org>
|
||||||
|
@ -15573,7 +15616,7 @@ F: mm/numa_memblks.c
|
||||||
|
|
||||||
MEMORY MANAGEMENT - PAGE ALLOCATOR
|
MEMORY MANAGEMENT - PAGE ALLOCATOR
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
R: Vlastimil Babka <vbabka@suse.cz>
|
M: Vlastimil Babka <vbabka@suse.cz>
|
||||||
R: Suren Baghdasaryan <surenb@google.com>
|
R: Suren Baghdasaryan <surenb@google.com>
|
||||||
R: Michal Hocko <mhocko@suse.com>
|
R: Michal Hocko <mhocko@suse.com>
|
||||||
R: Brendan Jackman <jackmanb@google.com>
|
R: Brendan Jackman <jackmanb@google.com>
|
||||||
|
@ -15581,10 +15624,25 @@ R: Johannes Weiner <hannes@cmpxchg.org>
|
||||||
R: Zi Yan <ziy@nvidia.com>
|
R: Zi Yan <ziy@nvidia.com>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
F: include/linux/compaction.h
|
||||||
|
F: include/linux/gfp.h
|
||||||
|
F: include/linux/page-isolation.h
|
||||||
F: mm/compaction.c
|
F: mm/compaction.c
|
||||||
F: mm/page_alloc.c
|
F: mm/page_alloc.c
|
||||||
F: include/linux/gfp.h
|
F: mm/page_isolation.c
|
||||||
F: include/linux/compaction.h
|
|
||||||
|
MEMORY MANAGEMENT - RECLAIM
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||||
|
R: David Hildenbrand <david@redhat.com>
|
||||||
|
R: Michal Hocko <mhocko@kernel.org>
|
||||||
|
R: Qi Zheng <zhengqi.arch@bytedance.com>
|
||||||
|
R: Shakeel Butt <shakeel.butt@linux.dev>
|
||||||
|
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||||
|
L: linux-mm@kvack.org
|
||||||
|
S: Maintained
|
||||||
|
F: mm/pt_reclaim.c
|
||||||
|
F: mm/vmscan.c
|
||||||
|
|
||||||
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
|
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
@ -25912,7 +25970,7 @@ F: tools/testing/vsock/
|
||||||
|
|
||||||
VMALLOC
|
VMALLOC
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
R: Uladzislau Rezki <urezki@gmail.com>
|
M: Uladzislau Rezki <urezki@gmail.com>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: http://www.linux-mm.org
|
W: http://www.linux-mm.org
|
||||||
|
|
|
@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
|
||||||
|
|
||||||
#else /* ARCH_NEEDS_WEAK_PER_CPU */
|
#else /* ARCH_NEEDS_WEAK_PER_CPU */
|
||||||
|
|
||||||
|
#ifdef MODULE
|
||||||
|
|
||||||
|
#define DEFINE_ALLOC_TAG(_alloc_tag) \
|
||||||
|
static struct alloc_tag _alloc_tag __used __aligned(8) \
|
||||||
|
__section(ALLOC_TAG_SECTION_NAME) = { \
|
||||||
|
.ct = CODE_TAG_INIT, \
|
||||||
|
.counters = NULL };
|
||||||
|
|
||||||
|
#else /* MODULE */
|
||||||
|
|
||||||
#define DEFINE_ALLOC_TAG(_alloc_tag) \
|
#define DEFINE_ALLOC_TAG(_alloc_tag) \
|
||||||
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
|
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
|
||||||
static struct alloc_tag _alloc_tag __used __aligned(8) \
|
static struct alloc_tag _alloc_tag __used __aligned(8) \
|
||||||
|
@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
|
||||||
.ct = CODE_TAG_INIT, \
|
.ct = CODE_TAG_INIT, \
|
||||||
.counters = &_alloc_tag_cntr };
|
.counters = &_alloc_tag_cntr };
|
||||||
|
|
||||||
|
#endif /* MODULE */
|
||||||
|
|
||||||
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
|
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
|
||||||
|
|
||||||
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
|
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
|
||||||
|
|
|
@ -36,10 +36,10 @@ union codetag_ref {
|
||||||
struct codetag_type_desc {
|
struct codetag_type_desc {
|
||||||
const char *section;
|
const char *section;
|
||||||
size_t tag_size;
|
size_t tag_size;
|
||||||
void (*module_load)(struct codetag_type *cttype,
|
void (*module_load)(struct module *mod,
|
||||||
struct codetag_module *cmod);
|
struct codetag *start, struct codetag *end);
|
||||||
void (*module_unload)(struct codetag_type *cttype,
|
void (*module_unload)(struct module *mod,
|
||||||
struct codetag_module *cmod);
|
struct codetag *start, struct codetag *end);
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
void (*module_replaced)(struct module *mod, struct module *new_mod);
|
void (*module_replaced)(struct module *mod, struct module *new_mod);
|
||||||
bool (*needs_section_mem)(struct module *mod, unsigned long size);
|
bool (*needs_section_mem)(struct module *mod, unsigned long size);
|
||||||
|
|
|
@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
|
||||||
const char *from = kmap_local_folio(folio, offset);
|
const char *from = kmap_local_folio(folio, offset);
|
||||||
size_t chunk = len;
|
size_t chunk = len;
|
||||||
|
|
||||||
if (folio_test_highmem(folio) &&
|
if (folio_test_partial_kmap(folio) &&
|
||||||
chunk > PAGE_SIZE - offset_in_page(offset))
|
chunk > PAGE_SIZE - offset_in_page(offset))
|
||||||
chunk = PAGE_SIZE - offset_in_page(offset);
|
chunk = PAGE_SIZE - offset_in_page(offset);
|
||||||
memcpy(to, from, chunk);
|
memcpy(to, from, chunk);
|
||||||
|
@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
|
||||||
char *to = kmap_local_folio(folio, offset);
|
char *to = kmap_local_folio(folio, offset);
|
||||||
size_t chunk = len;
|
size_t chunk = len;
|
||||||
|
|
||||||
if (folio_test_highmem(folio) &&
|
if (folio_test_partial_kmap(folio) &&
|
||||||
chunk > PAGE_SIZE - offset_in_page(offset))
|
chunk > PAGE_SIZE - offset_in_page(offset))
|
||||||
chunk = PAGE_SIZE - offset_in_page(offset);
|
chunk = PAGE_SIZE - offset_in_page(offset);
|
||||||
memcpy(to, from, chunk);
|
memcpy(to, from, chunk);
|
||||||
|
@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
|
||||||
{
|
{
|
||||||
size_t len = folio_size(folio) - offset;
|
size_t len = folio_size(folio) - offset;
|
||||||
|
|
||||||
if (folio_test_highmem(folio)) {
|
if (folio_test_partial_kmap(folio)) {
|
||||||
size_t max = PAGE_SIZE - offset_in_page(offset);
|
size_t max = PAGE_SIZE - offset_in_page(offset);
|
||||||
|
|
||||||
while (len > max) {
|
while (len > max) {
|
||||||
|
@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
|
||||||
|
|
||||||
VM_BUG_ON(offset + len > folio_size(folio));
|
VM_BUG_ON(offset + len > folio_size(folio));
|
||||||
|
|
||||||
if (folio_test_highmem(folio)) {
|
if (folio_test_partial_kmap(folio)) {
|
||||||
size_t max = PAGE_SIZE - offset_in_page(offset);
|
size_t max = PAGE_SIZE - offset_in_page(offset);
|
||||||
|
|
||||||
while (len > max) {
|
while (len > max) {
|
||||||
|
@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
|
||||||
size_t offset = offset_in_folio(folio, pos);
|
size_t offset = offset_in_folio(folio, pos);
|
||||||
char *from = kmap_local_folio(folio, offset);
|
char *from = kmap_local_folio(folio, offset);
|
||||||
|
|
||||||
if (folio_test_highmem(folio)) {
|
if (folio_test_partial_kmap(folio)) {
|
||||||
offset = offset_in_page(offset);
|
offset = offset_in_page(offset);
|
||||||
len = min_t(size_t, len, PAGE_SIZE - offset);
|
len = min_t(size_t, len, PAGE_SIZE - offset);
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -275,6 +275,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||||
bool is_hugetlb_entry_migration(pte_t pte);
|
bool is_hugetlb_entry_migration(pte_t pte);
|
||||||
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
|
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
|
||||||
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
|
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
|
||||||
|
void fixup_hugetlb_reservations(struct vm_area_struct *vma);
|
||||||
|
|
||||||
#else /* !CONFIG_HUGETLB_PAGE */
|
#else /* !CONFIG_HUGETLB_PAGE */
|
||||||
|
|
||||||
|
@ -468,6 +469,10 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
|
||||||
|
|
||||||
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
|
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
|
||||||
|
|
||||||
|
static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* !CONFIG_HUGETLB_PAGE */
|
#endif /* !CONFIG_HUGETLB_PAGE */
|
||||||
|
|
||||||
#ifndef pgd_write
|
#ifndef pgd_write
|
||||||
|
|
|
@ -385,7 +385,7 @@ extern unsigned int kobjsize(const void *objp);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
|
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
|
||||||
# define VM_UFFD_MINOR_BIT 38
|
# define VM_UFFD_MINOR_BIT 41
|
||||||
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
|
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
|
||||||
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
|
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
|
||||||
# define VM_UFFD_MINOR VM_NONE
|
# define VM_UFFD_MINOR VM_NONE
|
||||||
|
|
|
@ -155,7 +155,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags)
|
||||||
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
|
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
|
||||||
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
|
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
|
||||||
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
|
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
|
||||||
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
|
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
|
||||||
|
#endif
|
||||||
arch_calc_vm_flag_bits(file, flags);
|
arch_calc_vm_flag_bits(file, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -615,6 +615,13 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
|
||||||
PAGEFLAG_FALSE(HighMem, highmem)
|
PAGEFLAG_FALSE(HighMem, highmem)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Does kmap_local_folio() only allow access to one page of the folio? */
|
||||||
|
#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
|
||||||
|
#define folio_test_partial_kmap(f) true
|
||||||
|
#else
|
||||||
|
#define folio_test_partial_kmap(f) folio_test_highmem(f)
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SWAP
|
#ifdef CONFIG_SWAP
|
||||||
static __always_inline bool folio_test_swapcache(const struct folio *folio)
|
static __always_inline bool folio_test_swapcache(const struct folio *folio)
|
||||||
{
|
{
|
||||||
|
|
|
@ -15,11 +15,7 @@
|
||||||
|
|
||||||
/* enough to cover all DEFINE_PER_CPUs in modules */
|
/* enough to cover all DEFINE_PER_CPUs in modules */
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
|
||||||
#define PERCPU_MODULE_RESERVE (8 << 13)
|
|
||||||
#else
|
|
||||||
#define PERCPU_MODULE_RESERVE (8 << 10)
|
#define PERCPU_MODULE_RESERVE (8 << 10)
|
||||||
#endif
|
|
||||||
#else
|
#else
|
||||||
#define PERCPU_MODULE_RESERVE 0
|
#define PERCPU_MODULE_RESERVE 0
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#define TASKSTATS_VERSION 15
|
#define TASKSTATS_VERSION 16
|
||||||
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
|
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
|
||||||
* in linux/sched.h */
|
* in linux/sched.h */
|
||||||
|
|
||||||
|
@ -72,8 +72,6 @@ struct taskstats {
|
||||||
*/
|
*/
|
||||||
__u64 cpu_count __attribute__((aligned(8)));
|
__u64 cpu_count __attribute__((aligned(8)));
|
||||||
__u64 cpu_delay_total;
|
__u64 cpu_delay_total;
|
||||||
__u64 cpu_delay_max;
|
|
||||||
__u64 cpu_delay_min;
|
|
||||||
|
|
||||||
/* Following four fields atomically updated using task->delays->lock */
|
/* Following four fields atomically updated using task->delays->lock */
|
||||||
|
|
||||||
|
@ -82,14 +80,10 @@ struct taskstats {
|
||||||
*/
|
*/
|
||||||
__u64 blkio_count;
|
__u64 blkio_count;
|
||||||
__u64 blkio_delay_total;
|
__u64 blkio_delay_total;
|
||||||
__u64 blkio_delay_max;
|
|
||||||
__u64 blkio_delay_min;
|
|
||||||
|
|
||||||
/* Delay waiting for page fault I/O (swap in only) */
|
/* Delay waiting for page fault I/O (swap in only) */
|
||||||
__u64 swapin_count;
|
__u64 swapin_count;
|
||||||
__u64 swapin_delay_total;
|
__u64 swapin_delay_total;
|
||||||
__u64 swapin_delay_max;
|
|
||||||
__u64 swapin_delay_min;
|
|
||||||
|
|
||||||
/* cpu "wall-clock" running time
|
/* cpu "wall-clock" running time
|
||||||
* On some architectures, value will adjust for cpu time stolen
|
* On some architectures, value will adjust for cpu time stolen
|
||||||
|
@ -172,14 +166,11 @@ struct taskstats {
|
||||||
/* Delay waiting for memory reclaim */
|
/* Delay waiting for memory reclaim */
|
||||||
__u64 freepages_count;
|
__u64 freepages_count;
|
||||||
__u64 freepages_delay_total;
|
__u64 freepages_delay_total;
|
||||||
__u64 freepages_delay_max;
|
|
||||||
__u64 freepages_delay_min;
|
|
||||||
|
|
||||||
/* Delay waiting for thrashing page */
|
/* Delay waiting for thrashing page */
|
||||||
__u64 thrashing_count;
|
__u64 thrashing_count;
|
||||||
__u64 thrashing_delay_total;
|
__u64 thrashing_delay_total;
|
||||||
__u64 thrashing_delay_max;
|
|
||||||
__u64 thrashing_delay_min;
|
|
||||||
|
|
||||||
/* v10: 64-bit btime to avoid overflow */
|
/* v10: 64-bit btime to avoid overflow */
|
||||||
__u64 ac_btime64; /* 64-bit begin time */
|
__u64 ac_btime64; /* 64-bit begin time */
|
||||||
|
@ -187,8 +178,6 @@ struct taskstats {
|
||||||
/* v11: Delay waiting for memory compact */
|
/* v11: Delay waiting for memory compact */
|
||||||
__u64 compact_count;
|
__u64 compact_count;
|
||||||
__u64 compact_delay_total;
|
__u64 compact_delay_total;
|
||||||
__u64 compact_delay_max;
|
|
||||||
__u64 compact_delay_min;
|
|
||||||
|
|
||||||
/* v12 begin */
|
/* v12 begin */
|
||||||
__u32 ac_tgid; /* thread group ID */
|
__u32 ac_tgid; /* thread group ID */
|
||||||
|
@ -210,15 +199,37 @@ struct taskstats {
|
||||||
/* v13: Delay waiting for write-protect copy */
|
/* v13: Delay waiting for write-protect copy */
|
||||||
__u64 wpcopy_count;
|
__u64 wpcopy_count;
|
||||||
__u64 wpcopy_delay_total;
|
__u64 wpcopy_delay_total;
|
||||||
__u64 wpcopy_delay_max;
|
|
||||||
__u64 wpcopy_delay_min;
|
|
||||||
|
|
||||||
/* v14: Delay waiting for IRQ/SOFTIRQ */
|
/* v14: Delay waiting for IRQ/SOFTIRQ */
|
||||||
__u64 irq_count;
|
__u64 irq_count;
|
||||||
__u64 irq_delay_total;
|
__u64 irq_delay_total;
|
||||||
__u64 irq_delay_max;
|
|
||||||
__u64 irq_delay_min;
|
/* v15: add Delay max and Delay min */
|
||||||
/* v15: add Delay max */
|
|
||||||
|
/* v16: move Delay max and Delay min to the end of taskstat */
|
||||||
|
__u64 cpu_delay_max;
|
||||||
|
__u64 cpu_delay_min;
|
||||||
|
|
||||||
|
__u64 blkio_delay_max;
|
||||||
|
__u64 blkio_delay_min;
|
||||||
|
|
||||||
|
__u64 swapin_delay_max;
|
||||||
|
__u64 swapin_delay_min;
|
||||||
|
|
||||||
|
__u64 freepages_delay_max;
|
||||||
|
__u64 freepages_delay_min;
|
||||||
|
|
||||||
|
__u64 thrashing_delay_max;
|
||||||
|
__u64 thrashing_delay_min;
|
||||||
|
|
||||||
|
__u64 compact_delay_max;
|
||||||
|
__u64 compact_delay_min;
|
||||||
|
|
||||||
|
__u64 wpcopy_delay_max;
|
||||||
|
__u64 wpcopy_delay_min;
|
||||||
|
|
||||||
|
__u64 irq_delay_max;
|
||||||
|
__u64 irq_delay_min;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2829,6 +2829,7 @@ static void module_deallocate(struct module *mod, struct load_info *info)
|
||||||
{
|
{
|
||||||
percpu_modfree(mod);
|
percpu_modfree(mod);
|
||||||
module_arch_freeing_init(mod);
|
module_arch_freeing_init(mod);
|
||||||
|
codetag_free_module_sections(mod);
|
||||||
|
|
||||||
free_mod_mem(mod);
|
free_mod_mem(mod);
|
||||||
}
|
}
|
||||||
|
|
|
@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size)
|
||||||
return size >= sizeof(struct alloc_tag);
|
return size >= sizeof(struct alloc_tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
|
static bool clean_unused_counters(struct alloc_tag *start_tag,
|
||||||
|
struct alloc_tag *end_tag)
|
||||||
{
|
{
|
||||||
while (from <= to) {
|
struct alloc_tag *tag;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
for (tag = start_tag; tag <= end_tag; tag++) {
|
||||||
struct alloc_tag_counters counter;
|
struct alloc_tag_counters counter;
|
||||||
|
|
||||||
counter = alloc_tag_read(from);
|
if (!tag->counters)
|
||||||
if (counter.bytes)
|
continue;
|
||||||
return from;
|
|
||||||
from++;
|
counter = alloc_tag_read(tag);
|
||||||
|
if (!counter.bytes) {
|
||||||
|
free_percpu(tag->counters);
|
||||||
|
tag->counters = NULL;
|
||||||
|
} else {
|
||||||
|
ret = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with mod_area_mt locked */
|
/* Called with mod_area_mt locked */
|
||||||
|
@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void)
|
||||||
struct module *val;
|
struct module *val;
|
||||||
|
|
||||||
mas_for_each(&mas, val, module_tags.size) {
|
mas_for_each(&mas, val, module_tags.size) {
|
||||||
|
struct alloc_tag *start_tag;
|
||||||
|
struct alloc_tag *end_tag;
|
||||||
|
|
||||||
if (val != &unloaded_mod)
|
if (val != &unloaded_mod)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Release area if all tags are unused */
|
/* Release area if all tags are unused */
|
||||||
if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
|
start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
|
||||||
(struct alloc_tag *)(module_tags.start_addr + mas.last)))
|
end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
|
||||||
|
if (clean_unused_counters(start_tag, end_tag))
|
||||||
mas_erase(&mas);
|
mas_erase(&mas);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -561,7 +575,8 @@ unlock:
|
||||||
static void release_module_tags(struct module *mod, bool used)
|
static void release_module_tags(struct module *mod, bool used)
|
||||||
{
|
{
|
||||||
MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
|
MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
|
||||||
struct alloc_tag *tag;
|
struct alloc_tag *start_tag;
|
||||||
|
struct alloc_tag *end_tag;
|
||||||
struct module *val;
|
struct module *val;
|
||||||
|
|
||||||
mas_lock(&mas);
|
mas_lock(&mas);
|
||||||
|
@ -575,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used)
|
||||||
if (!used)
|
if (!used)
|
||||||
goto release_area;
|
goto release_area;
|
||||||
|
|
||||||
/* Find out if the area is used */
|
start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
|
||||||
tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
|
end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
|
||||||
(struct alloc_tag *)(module_tags.start_addr + mas.last));
|
if (!clean_unused_counters(start_tag, end_tag)) {
|
||||||
if (tag) {
|
struct alloc_tag *tag;
|
||||||
struct alloc_tag_counters counter = alloc_tag_read(tag);
|
|
||||||
|
|
||||||
pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
|
for (tag = start_tag; tag <= end_tag; tag++) {
|
||||||
tag->ct.filename, tag->ct.lineno, tag->ct.modname,
|
struct alloc_tag_counters counter;
|
||||||
tag->ct.function, counter.bytes);
|
|
||||||
|
if (!tag->counters)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
counter = alloc_tag_read(tag);
|
||||||
|
pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
|
||||||
|
tag->ct.filename, tag->ct.lineno, tag->ct.modname,
|
||||||
|
tag->ct.function, counter.bytes);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
used = false;
|
used = false;
|
||||||
}
|
}
|
||||||
|
@ -596,6 +618,34 @@ out:
|
||||||
mas_unlock(&mas);
|
mas_unlock(&mas);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void load_module(struct module *mod, struct codetag *start, struct codetag *stop)
|
||||||
|
{
|
||||||
|
/* Allocate module alloc_tag percpu counters */
|
||||||
|
struct alloc_tag *start_tag;
|
||||||
|
struct alloc_tag *stop_tag;
|
||||||
|
struct alloc_tag *tag;
|
||||||
|
|
||||||
|
if (!mod)
|
||||||
|
return;
|
||||||
|
|
||||||
|
start_tag = ct_to_alloc_tag(start);
|
||||||
|
stop_tag = ct_to_alloc_tag(stop);
|
||||||
|
for (tag = start_tag; tag < stop_tag; tag++) {
|
||||||
|
WARN_ON(tag->counters);
|
||||||
|
tag->counters = alloc_percpu(struct alloc_tag_counters);
|
||||||
|
if (!tag->counters) {
|
||||||
|
while (--tag >= start_tag) {
|
||||||
|
free_percpu(tag->counters);
|
||||||
|
tag->counters = NULL;
|
||||||
|
}
|
||||||
|
shutdown_mem_profiling(true);
|
||||||
|
pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
|
||||||
|
mod->name);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void replace_module(struct module *mod, struct module *new_mod)
|
static void replace_module(struct module *mod, struct module *new_mod)
|
||||||
{
|
{
|
||||||
MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
|
MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
|
||||||
|
@ -757,6 +807,7 @@ static int __init alloc_tag_init(void)
|
||||||
.needs_section_mem = needs_section_mem,
|
.needs_section_mem = needs_section_mem,
|
||||||
.alloc_section_mem = reserve_module_tags,
|
.alloc_section_mem = reserve_module_tags,
|
||||||
.free_section_mem = release_module_tags,
|
.free_section_mem = release_module_tags,
|
||||||
|
.module_load = load_module,
|
||||||
.module_replaced = replace_module,
|
.module_replaced = replace_module,
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
|
@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
|
||||||
if (err >= 0) {
|
if (err >= 0) {
|
||||||
cttype->count += range_size(cttype, &range);
|
cttype->count += range_size(cttype, &range);
|
||||||
if (cttype->desc.module_load)
|
if (cttype->desc.module_load)
|
||||||
cttype->desc.module_load(cttype, cmod);
|
cttype->desc.module_load(mod, range.start, range.stop);
|
||||||
}
|
}
|
||||||
up_write(&cttype->mod_lock);
|
up_write(&cttype->mod_lock);
|
||||||
|
|
||||||
|
@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod)
|
||||||
}
|
}
|
||||||
if (found) {
|
if (found) {
|
||||||
if (cttype->desc.module_unload)
|
if (cttype->desc.module_unload)
|
||||||
cttype->desc.module_unload(cttype, cmod);
|
cttype->desc.module_unload(cmod->mod,
|
||||||
|
cmod->range.start, cmod->range.stop);
|
||||||
|
|
||||||
cttype->count -= range_size(cttype, &cmod->range);
|
cttype->count -= range_size(cttype, &cmod->range);
|
||||||
idr_remove(&cttype->mod_idr, mod_id);
|
idr_remove(&cttype->mod_idr, mod_id);
|
||||||
|
|
5
mm/cma.c
5
mm/cma.c
|
@ -608,7 +608,10 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
|
||||||
* complain. Find the boundary by adding one to the last valid
|
* complain. Find the boundary by adding one to the last valid
|
||||||
* address.
|
* address.
|
||||||
*/
|
*/
|
||||||
highmem_start = __pa(high_memory - 1) + 1;
|
if (IS_ENABLED(CONFIG_HIGHMEM))
|
||||||
|
highmem_start = __pa(high_memory - 1) + 1;
|
||||||
|
else
|
||||||
|
highmem_start = memblock_end_of_DRAM();
|
||||||
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
|
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
|
||||||
__func__, &size, &base, &limit, &alignment);
|
__func__, &size, &base, &limit, &alignment);
|
||||||
|
|
||||||
|
|
24
mm/hugetlb.c
24
mm/hugetlb.c
|
@ -1250,7 +1250,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
|
||||||
/*
|
/*
|
||||||
* Reset and decrement one ref on hugepage private reservation.
|
* Reset and decrement one ref on hugepage private reservation.
|
||||||
* Called with mm->mmap_lock writer semaphore held.
|
* Called with mm->mmap_lock writer semaphore held.
|
||||||
* This function should be only used by move_vma() and operate on
|
* This function should be only used by mremap and operate on
|
||||||
* same sized vma. It should never come here with last ref on the
|
* same sized vma. It should never come here with last ref on the
|
||||||
* reservation.
|
* reservation.
|
||||||
*/
|
*/
|
||||||
|
@ -2949,12 +2949,20 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
|
|
||||||
while (start_pfn < end_pfn) {
|
while (start_pfn < end_pfn) {
|
||||||
folio = pfn_folio(start_pfn);
|
folio = pfn_folio(start_pfn);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The folio might have been dissolved from under our feet, so make sure
|
||||||
|
* to carefully check the state under the lock.
|
||||||
|
*/
|
||||||
|
spin_lock_irq(&hugetlb_lock);
|
||||||
if (folio_test_hugetlb(folio)) {
|
if (folio_test_hugetlb(folio)) {
|
||||||
h = folio_hstate(folio);
|
h = folio_hstate(folio);
|
||||||
} else {
|
} else {
|
||||||
|
spin_unlock_irq(&hugetlb_lock);
|
||||||
start_pfn++;
|
start_pfn++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
spin_unlock_irq(&hugetlb_lock);
|
||||||
|
|
||||||
if (!folio_ref_count(folio)) {
|
if (!folio_ref_count(folio)) {
|
||||||
ret = alloc_and_dissolve_hugetlb_folio(h, folio,
|
ret = alloc_and_dissolve_hugetlb_folio(h, folio,
|
||||||
|
@ -7931,3 +7939,17 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
|
||||||
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
|
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
|
||||||
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
|
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For hugetlb, mremap() is an odd edge case - while the VMA copying is
|
||||||
|
* performed, we permit both the old and new VMAs to reference the same
|
||||||
|
* reservation.
|
||||||
|
*
|
||||||
|
* We fix this up after the operation succeeds, or if a newly allocated VMA
|
||||||
|
* is closed as a result of a failure to allocate memory.
|
||||||
|
*/
|
||||||
|
void fixup_hugetlb_reservations(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
if (is_vm_hugetlb_page(vma))
|
||||||
|
clear_vma_resv_huge_pages(vma);
|
||||||
|
}
|
||||||
|
|
|
@ -292,33 +292,99 @@ void __init __weak kasan_populate_early_vm_area_shadow(void *start,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vmalloc_populate_data {
|
||||||
|
unsigned long start;
|
||||||
|
struct page **pages;
|
||||||
|
};
|
||||||
|
|
||||||
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
||||||
void *unused)
|
void *_data)
|
||||||
{
|
{
|
||||||
unsigned long page;
|
struct vmalloc_populate_data *data = _data;
|
||||||
|
struct page *page;
|
||||||
pte_t pte;
|
pte_t pte;
|
||||||
|
int index;
|
||||||
|
|
||||||
if (likely(!pte_none(ptep_get(ptep))))
|
if (likely(!pte_none(ptep_get(ptep))))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
page = __get_free_page(GFP_KERNEL);
|
index = PFN_DOWN(addr - data->start);
|
||||||
if (!page)
|
page = data->pages[index];
|
||||||
return -ENOMEM;
|
__memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE);
|
||||||
|
pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
|
||||||
__memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
|
|
||||||
pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
|
|
||||||
|
|
||||||
spin_lock(&init_mm.page_table_lock);
|
spin_lock(&init_mm.page_table_lock);
|
||||||
if (likely(pte_none(ptep_get(ptep)))) {
|
if (likely(pte_none(ptep_get(ptep)))) {
|
||||||
set_pte_at(&init_mm, addr, ptep, pte);
|
set_pte_at(&init_mm, addr, ptep, pte);
|
||||||
page = 0;
|
data->pages[index] = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock(&init_mm.page_table_lock);
|
spin_unlock(&init_mm.page_table_lock);
|
||||||
if (page)
|
|
||||||
free_page(page);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ___free_pages_bulk(struct page **pages, int nr_pages)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
if (pages[i]) {
|
||||||
|
__free_pages(pages[i], 0);
|
||||||
|
pages[i] = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
|
||||||
|
{
|
||||||
|
unsigned long nr_populated, nr_total = nr_pages;
|
||||||
|
struct page **page_array = pages;
|
||||||
|
|
||||||
|
while (nr_pages) {
|
||||||
|
nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
|
||||||
|
if (!nr_populated) {
|
||||||
|
___free_pages_bulk(page_array, nr_total - nr_pages);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
pages += nr_populated;
|
||||||
|
nr_pages -= nr_populated;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
unsigned long nr_pages, nr_total = PFN_UP(end - start);
|
||||||
|
struct vmalloc_populate_data data;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!data.pages)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
while (nr_total) {
|
||||||
|
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
|
||||||
|
ret = ___alloc_pages_bulk(data.pages, nr_pages);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
|
data.start = start;
|
||||||
|
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
|
||||||
|
kasan_populate_vmalloc_pte, &data);
|
||||||
|
___free_pages_bulk(data.pages, nr_pages);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
|
start += nr_pages * PAGE_SIZE;
|
||||||
|
nr_total -= nr_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
free_page((unsigned long)data.pages);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long shadow_start, shadow_end;
|
unsigned long shadow_start, shadow_end;
|
||||||
|
@ -348,9 +414,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
||||||
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
|
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
|
||||||
shadow_end = PAGE_ALIGN(shadow_end);
|
shadow_end = PAGE_ALIGN(shadow_end);
|
||||||
|
|
||||||
ret = apply_to_page_range(&init_mm, shadow_start,
|
ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
|
||||||
shadow_end - shadow_start,
|
|
||||||
kasan_populate_vmalloc_pte, NULL);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -1168,7 +1168,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
|
||||||
{
|
{
|
||||||
struct mem_cgroup *iter;
|
struct mem_cgroup *iter;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
BUG_ON(mem_cgroup_is_root(memcg));
|
BUG_ON(mem_cgroup_is_root(memcg));
|
||||||
|
|
||||||
|
@ -1178,10 +1177,9 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
|
||||||
|
|
||||||
css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
|
css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
|
||||||
while (!ret && (task = css_task_iter_next(&it))) {
|
while (!ret && (task = css_task_iter_next(&it))) {
|
||||||
/* Avoid potential softlockup warning */
|
|
||||||
if ((++i & 1023) == 0)
|
|
||||||
cond_resched();
|
|
||||||
ret = fn(task, arg);
|
ret = fn(task, arg);
|
||||||
|
/* Avoid potential softlockup warning */
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
css_task_iter_end(&it);
|
css_task_iter_end(&it);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -1188,8 +1188,7 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm,
|
||||||
mremap_userfaultfd_prep(new_vma, vrm->uf);
|
mremap_userfaultfd_prep(new_vma, vrm->uf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_vm_hugetlb_page(vma))
|
fixup_hugetlb_reservations(vma);
|
||||||
clear_vma_resv_huge_pages(vma);
|
|
||||||
|
|
||||||
/* Tell pfnmap has moved from this vma */
|
/* Tell pfnmap has moved from this vma */
|
||||||
if (unlikely(vma->vm_flags & VM_PFNMAP))
|
if (unlikely(vma->vm_flags & VM_PFNMAP))
|
||||||
|
|
|
@ -4562,6 +4562,14 @@ restart:
|
||||||
}
|
}
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
|
/*
|
||||||
|
* Deal with possible cpuset update races or zonelist updates to avoid
|
||||||
|
* infinite retries.
|
||||||
|
*/
|
||||||
|
if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
|
||||||
|
check_retry_zonelist(zonelist_iter_cookie))
|
||||||
|
goto restart;
|
||||||
|
|
||||||
/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
|
/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
|
||||||
if (alloc_flags & ALLOC_KSWAPD)
|
if (alloc_flags & ALLOC_KSWAPD)
|
||||||
wake_all_kswapds(order, gfp_mask, ac);
|
wake_all_kswapds(order, gfp_mask, ac);
|
||||||
|
|
|
@ -191,6 +191,7 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
|
||||||
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
|
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
|
||||||
{
|
{
|
||||||
loff_t pos = folio_pos(folio);
|
loff_t pos = folio_pos(folio);
|
||||||
|
size_t size = folio_size(folio);
|
||||||
unsigned int offset, length;
|
unsigned int offset, length;
|
||||||
struct page *split_at, *split_at2;
|
struct page *split_at, *split_at2;
|
||||||
|
|
||||||
|
@ -198,14 +199,13 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
|
||||||
offset = start - pos;
|
offset = start - pos;
|
||||||
else
|
else
|
||||||
offset = 0;
|
offset = 0;
|
||||||
length = folio_size(folio);
|
if (pos + size <= (u64)end)
|
||||||
if (pos + length <= (u64)end)
|
length = size - offset;
|
||||||
length = length - offset;
|
|
||||||
else
|
else
|
||||||
length = end + 1 - pos - offset;
|
length = end + 1 - pos - offset;
|
||||||
|
|
||||||
folio_wait_writeback(folio);
|
folio_wait_writeback(folio);
|
||||||
if (length == folio_size(folio)) {
|
if (length == size) {
|
||||||
truncate_inode_folio(folio->mapping, folio);
|
truncate_inode_folio(folio->mapping, folio);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -224,16 +224,20 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
|
split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
|
||||||
split_at2 = folio_page(folio,
|
|
||||||
PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
|
|
||||||
|
|
||||||
if (!try_folio_split(folio, split_at, NULL)) {
|
if (!try_folio_split(folio, split_at, NULL)) {
|
||||||
/*
|
/*
|
||||||
* try to split at offset + length to make sure folios within
|
* try to split at offset + length to make sure folios within
|
||||||
* the range can be dropped, especially to avoid memory waste
|
* the range can be dropped, especially to avoid memory waste
|
||||||
* for shmem truncate
|
* for shmem truncate
|
||||||
*/
|
*/
|
||||||
struct folio *folio2 = page_folio(split_at2);
|
struct folio *folio2;
|
||||||
|
|
||||||
|
if (offset + length == size)
|
||||||
|
goto no_split;
|
||||||
|
|
||||||
|
split_at2 = folio_page(folio,
|
||||||
|
PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
|
||||||
|
folio2 = page_folio(split_at2);
|
||||||
|
|
||||||
if (!folio_try_get(folio2))
|
if (!folio_try_get(folio2))
|
||||||
goto no_split;
|
goto no_split;
|
||||||
|
|
1
mm/vma.c
1
mm/vma.c
|
@ -1834,6 +1834,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
||||||
return new_vma;
|
return new_vma;
|
||||||
|
|
||||||
out_vma_link:
|
out_vma_link:
|
||||||
|
fixup_hugetlb_reservations(new_vma);
|
||||||
vma_close(new_vma);
|
vma_close(new_vma);
|
||||||
|
|
||||||
if (new_vma->vm_file)
|
if (new_vma->vm_file)
|
||||||
|
|
13
mm/vmalloc.c
13
mm/vmalloc.c
|
@ -4093,8 +4093,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
|
||||||
* would be a good heuristic for when to shrink the vm_area?
|
* would be a good heuristic for when to shrink the vm_area?
|
||||||
*/
|
*/
|
||||||
if (size <= old_size) {
|
if (size <= old_size) {
|
||||||
/* Zero out "freed" memory. */
|
/* Zero out "freed" memory, potentially for future realloc. */
|
||||||
if (want_init_on_free())
|
if (want_init_on_free() || want_init_on_alloc(flags))
|
||||||
memset((void *)p + size, 0, old_size - size);
|
memset((void *)p + size, 0, old_size - size);
|
||||||
vm->requested_size = size;
|
vm->requested_size = size;
|
||||||
kasan_poison_vmalloc(p + size, old_size - size);
|
kasan_poison_vmalloc(p + size, old_size - size);
|
||||||
|
@ -4107,10 +4107,13 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
|
||||||
if (size <= alloced_size) {
|
if (size <= alloced_size) {
|
||||||
kasan_unpoison_vmalloc(p + old_size, size - old_size,
|
kasan_unpoison_vmalloc(p + old_size, size - old_size,
|
||||||
KASAN_VMALLOC_PROT_NORMAL);
|
KASAN_VMALLOC_PROT_NORMAL);
|
||||||
/* Zero out "alloced" memory. */
|
/*
|
||||||
if (want_init_on_alloc(flags))
|
* No need to zero memory here, as unused memory will have
|
||||||
memset((void *)p + old_size, 0, size - old_size);
|
* already been zeroed at initial allocation time or during
|
||||||
|
* realloc shrink time.
|
||||||
|
*/
|
||||||
vm->requested_size = size;
|
vm->requested_size = size;
|
||||||
|
return (void *)p;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
|
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue