Merge branch 'hwpoison-hugepages' into hwpoison
Conflicts: mm/memory-failure.c
This commit is contained in:
commit
46e387bbd8
10 changed files with 551 additions and 125 deletions
|
|
@ -43,7 +43,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
|||
struct vm_area_struct *vma,
|
||||
int acctflags);
|
||||
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
|
||||
void __isolate_hwpoisoned_huge_page(struct page *page);
|
||||
int dequeue_hwpoisoned_huge_page(struct page *page);
|
||||
void copy_huge_page(struct page *dst, struct page *src);
|
||||
|
||||
extern unsigned long hugepages_treat_as_movable;
|
||||
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
||||
|
|
@ -101,7 +102,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
|
|||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
|
||||
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
||||
#define huge_pte_offset(mm, address) 0
|
||||
#define __isolate_hwpoisoned_huge_page(page) 0
|
||||
#define dequeue_hwpoisoned_huge_page(page) 0
|
||||
static inline void copy_huge_page(struct page *dst, struct page *src)
|
||||
{
|
||||
}
|
||||
|
||||
#define hugetlb_change_protection(vma, address, end, newprot)
|
||||
|
||||
|
|
@ -228,6 +232,8 @@ struct huge_bootmem_page {
|
|||
struct hstate *hstate;
|
||||
};
|
||||
|
||||
struct page *alloc_huge_page_node(struct hstate *h, int nid);
|
||||
|
||||
/* arch callback */
|
||||
int __init alloc_bootmem_huge_page(struct hstate *h);
|
||||
|
||||
|
|
@ -301,8 +307,14 @@ static inline struct hstate *page_hstate(struct page *page)
|
|||
return size_to_hstate(PAGE_SIZE << compound_order(page));
|
||||
}
|
||||
|
||||
static inline unsigned hstate_index_to_shift(unsigned index)
|
||||
{
|
||||
return hstates[index].order + PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#else
|
||||
struct hstate {};
|
||||
#define alloc_huge_page_node(h, nid) NULL
|
||||
#define alloc_bootmem_huge_page(h) NULL
|
||||
#define hstate_file(f) NULL
|
||||
#define hstate_vma(v) NULL
|
||||
|
|
@ -317,6 +329,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
|
|||
{
|
||||
return 1;
|
||||
}
|
||||
#define hstate_index_to_shift(index) 0
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_HUGETLB_H */
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ extern int migrate_page(struct address_space *,
|
|||
struct page *, struct page *);
|
||||
extern int migrate_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private, int offlining);
|
||||
extern int migrate_huge_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private, int offlining);
|
||||
|
||||
extern int fail_migrate_page(struct address_space *,
|
||||
struct page *, struct page *);
|
||||
|
|
@ -23,12 +25,17 @@ extern int migrate_prep_local(void);
|
|||
extern int migrate_vmas(struct mm_struct *mm,
|
||||
const nodemask_t *from, const nodemask_t *to,
|
||||
unsigned long flags);
|
||||
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
||||
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
struct page *newpage, struct page *page);
|
||||
#else
|
||||
#define PAGE_MIGRATION 0
|
||||
|
||||
static inline void putback_lru_pages(struct list_head *l) {}
|
||||
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private, int offlining) { return -ENOSYS; }
|
||||
static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private, int offlining) { return -ENOSYS; }
|
||||
|
||||
static inline int migrate_prep(void) { return -ENOSYS; }
|
||||
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
||||
|
|
@ -40,6 +47,15 @@ static inline int migrate_vmas(struct mm_struct *mm,
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline void migrate_page_copy(struct page *newpage,
|
||||
struct page *page) {}
|
||||
|
||||
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
struct page *newpage, struct page *page)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/* Possible settings for the migrate_page() method in address_operations */
|
||||
#define migrate_page NULL
|
||||
#define fail_migrate_page NULL
|
||||
|
|
|
|||
|
|
@ -718,12 +718,20 @@ static inline int page_mapped(struct page *page)
|
|||
#define VM_FAULT_SIGBUS 0x0002
|
||||
#define VM_FAULT_MAJOR 0x0004
|
||||
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
|
||||
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */
|
||||
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
|
||||
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
|
||||
|
||||
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
|
||||
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
|
||||
|
||||
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
|
||||
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
|
||||
|
||||
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
|
||||
VM_FAULT_HWPOISON_LARGE)
|
||||
|
||||
/* Encode hstate index for a hwpoisoned large page */
|
||||
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
|
||||
#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
|
||||
|
||||
/*
|
||||
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue