Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "There are two memory management related changes, the CMMA support for KVM to avoid swap-in of freed pages and the split page table lock for the PMD level. These two come with common code changes in mm/. A fix for the long standing theoretical TLB flush problem, this one comes with a common code change in kernel/sched/. Another set of changes is Heikos uaccess work, included is the initial set of patches with more to come. And fixes and cleanups as usual" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (36 commits) s390/con3270: optionally disable auto update s390/mm: remove unecessary parameter from pgste_ipte_notify s390/mm: remove unnecessary parameter from gmap_do_ipte_notify s390/mm: fixing comment so that parameter name match s390/smp: limit number of cpus in possible cpu mask hypfs: Add clarification for "weight_min" attribute s390: update defconfigs s390/ptrace: add support for PTRACE_SINGLEBLOCK s390/perf: make print_debug_cf() static s390/topology: Remove call to update_cpu_masks() s390/compat: remove compat exec domain s390: select CONFIG_TTY for use of tty in unconditional keyboard driver s390/appldata_os: fix cpu array size calculation s390/checksum: remove memset() within csum_partial_copy_from_user() s390/uaccess: remove copy_from_user_real() s390/sclp_early: Return correct HSA block count also for zero s390: add some drivers/subsystems to the MAINTAINERS file s390: improve debug feature usage s390/airq: add support for irq ranges s390/mm: enable split page table lock for PMD level ...
This commit is contained in:
commit
1f8c538ed6
61 changed files with 944 additions and 428 deletions
|
|
@ -44,11 +44,21 @@ struct airq_iv {
|
|||
|
||||
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
|
||||
void airq_iv_release(struct airq_iv *iv);
|
||||
unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
|
||||
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
|
||||
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
|
||||
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
|
||||
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
|
||||
{
|
||||
return airq_iv_alloc(iv, 1);
|
||||
}
|
||||
|
||||
static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
|
||||
{
|
||||
airq_iv_free(iv, bit, 1);
|
||||
}
|
||||
|
||||
static inline unsigned long airq_iv_end(struct airq_iv *iv)
|
||||
{
|
||||
return iv->end;
|
||||
|
|
|
|||
|
|
@ -13,9 +13,9 @@
|
|||
*
|
||||
* The bitop functions are defined to work on unsigned longs, so for an
|
||||
* s390x system the bits end up numbered:
|
||||
* |63..............0|127............64|191...........128|255...........196|
|
||||
* |63..............0|127............64|191...........128|255...........192|
|
||||
* and on s390:
|
||||
* |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
|
||||
* |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
|
||||
*
|
||||
* There are a few little-endian macros used mostly for filesystem
|
||||
* bitmaps, these work on similar bit arrays layouts, but
|
||||
|
|
@ -30,7 +30,7 @@
|
|||
* on an s390x system the bits are numbered:
|
||||
* |0..............63|64............127|128...........191|192...........255|
|
||||
* and on s390:
|
||||
* |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
|
||||
* |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
|
||||
*
|
||||
* The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
|
||||
* number field needs to be reversed compared to the LSB0 encoded bit
|
||||
|
|
@ -304,7 +304,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
|
|||
* On an s390x system the bits are numbered:
|
||||
* |0..............63|64............127|128...........191|192...........255|
|
||||
* and on s390:
|
||||
* |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
|
||||
* |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
|
||||
*/
|
||||
unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
|
||||
unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
|
||||
|
|
|
|||
|
|
@ -219,7 +219,9 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
|
|||
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
|
||||
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
|
||||
|
||||
extern struct ccw_device *ccw_device_probe_console(void);
|
||||
extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
|
||||
extern void ccw_device_destroy_console(struct ccw_device *);
|
||||
extern int ccw_device_enable_console(struct ccw_device *);
|
||||
extern void ccw_device_wait_idle(struct ccw_device *);
|
||||
extern int ccw_device_force_console(struct ccw_device *);
|
||||
|
||||
|
|
|
|||
|
|
@ -44,22 +44,15 @@ csum_partial(const void *buff, int len, __wsum sum)
|
|||
* here even more important to align src and dst on a 32-bit (or even
|
||||
* better 64-bit) boundary
|
||||
*
|
||||
* Copy from userspace and compute checksum. If we catch an exception
|
||||
* then zero the rest of the buffer.
|
||||
* Copy from userspace and compute checksum.
|
||||
*/
|
||||
static inline __wsum
|
||||
csum_partial_copy_from_user(const void __user *src, void *dst,
|
||||
int len, __wsum sum,
|
||||
int *err_ptr)
|
||||
{
|
||||
int missing;
|
||||
|
||||
missing = copy_from_user(dst, src, len);
|
||||
if (missing) {
|
||||
memset(dst + len - missing, 0, missing);
|
||||
if (unlikely(copy_from_user(dst, src, len)))
|
||||
*err_ptr = -EFAULT;
|
||||
}
|
||||
|
||||
return csum_partial(dst, len, sum);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,10 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
|
||||
int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
|
@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
|||
oparg = 1 << oparg;
|
||||
|
||||
pagefault_disable();
|
||||
ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
|
||||
ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret) {
|
||||
|
|
@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_FUTEX_H */
|
||||
|
|
|
|||
|
|
@ -106,7 +106,9 @@ struct kvm_s390_sie_block {
|
|||
__u64 gbea; /* 0x0180 */
|
||||
__u8 reserved188[24]; /* 0x0188 */
|
||||
__u32 fac; /* 0x01a0 */
|
||||
__u8 reserved1a4[68]; /* 0x01a4 */
|
||||
__u8 reserved1a4[20]; /* 0x01a4 */
|
||||
__u64 cbrlo; /* 0x01b8 */
|
||||
__u8 reserved1c0[40]; /* 0x01c0 */
|
||||
__u64 itdba; /* 0x01e8 */
|
||||
__u8 reserved1f0[16]; /* 0x01f0 */
|
||||
} __attribute__((packed));
|
||||
|
|
@ -155,6 +157,7 @@ struct kvm_vcpu_stat {
|
|||
u32 instruction_stsi;
|
||||
u32 instruction_stfl;
|
||||
u32 instruction_tprot;
|
||||
u32 instruction_essa;
|
||||
u32 instruction_sigp_sense;
|
||||
u32 instruction_sigp_sense_running;
|
||||
u32 instruction_sigp_external_call;
|
||||
|
|
|
|||
|
|
@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
|
|||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
||||
update_mm(next, tsk);
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (prev == next)
|
||||
return;
|
||||
if (atomic_inc_return(&next->context.attach_count) >> 16) {
|
||||
/* Delay update_mm until all TLB flushes are done. */
|
||||
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
|
||||
} else {
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
update_mm(next, tsk);
|
||||
if (next->context.flush_mm)
|
||||
/* Flush pending TLBs */
|
||||
__tlb_flush_mm(next);
|
||||
}
|
||||
atomic_dec(&prev->context.attach_count);
|
||||
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
|
||||
atomic_inc(&next->context.attach_count);
|
||||
/* Check for TLBs not flushed yet */
|
||||
__tlb_flush_mm_lazy(next);
|
||||
}
|
||||
|
||||
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
|
||||
static inline void finish_arch_post_lock_switch(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
|
||||
if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
|
||||
return;
|
||||
preempt_disable();
|
||||
clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
|
||||
while (atomic_read(&mm->context.attach_count) >> 16)
|
||||
cpu_relax();
|
||||
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
update_mm(mm, tsk);
|
||||
if (mm->context.flush_mm)
|
||||
__tlb_flush_mm(mm);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define enter_lazy_tlb(mm,tsk) do { } while (0)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
|
|||
void page_table_free(struct mm_struct *, unsigned long *);
|
||||
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
|
||||
|
||||
void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long);
|
||||
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned long key, bool nq);
|
||||
|
||||
|
|
@ -91,11 +92,22 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
||||
{
|
||||
unsigned long *table = crst_table_alloc(mm);
|
||||
if (table)
|
||||
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
|
||||
|
||||
if (!table)
|
||||
return NULL;
|
||||
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
|
||||
if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
|
||||
crst_table_free(mm, table);
|
||||
return NULL;
|
||||
}
|
||||
return (pmd_t *) table;
|
||||
}
|
||||
#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
pgtable_pmd_page_dtor(virt_to_page(pmd));
|
||||
crst_table_free(mm, (unsigned long *) pmd);
|
||||
}
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -229,6 +229,7 @@ extern unsigned long MODULES_END;
|
|||
#define _PAGE_READ 0x010 /* SW pte read bit */
|
||||
#define _PAGE_WRITE 0x020 /* SW pte write bit */
|
||||
#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
|
||||
#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
|
|
@ -394,6 +395,12 @@ extern unsigned long MODULES_END;
|
|||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/* Guest Page State used for virtualization */
|
||||
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
|
||||
#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
|
||||
#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
|
||||
#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
|
||||
|
||||
/*
|
||||
* A user page table pointer has the space-switch-event bit, the
|
||||
* private-space-control bit and the storage-alteration-event-control
|
||||
|
|
@ -617,6 +624,14 @@ static inline int pte_none(pte_t pte)
|
|||
return pte_val(pte) == _PAGE_INVALID;
|
||||
}
|
||||
|
||||
static inline int pte_swap(pte_t pte)
|
||||
{
|
||||
/* Bit pattern: (pte & 0x603) == 0x402 */
|
||||
return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
|
||||
_PAGE_TYPE | _PAGE_PRESENT))
|
||||
== (_PAGE_INVALID | _PAGE_TYPE);
|
||||
}
|
||||
|
||||
static inline int pte_file(pte_t pte)
|
||||
{
|
||||
/* Bit pattern: (pte & 0x601) == 0x600 */
|
||||
|
|
@ -821,20 +836,20 @@ unsigned long gmap_translate(unsigned long address, struct gmap *);
|
|||
unsigned long __gmap_fault(unsigned long address, struct gmap *);
|
||||
unsigned long gmap_fault(unsigned long address, struct gmap *);
|
||||
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
|
||||
void __gmap_zap(unsigned long address, struct gmap *);
|
||||
|
||||
void gmap_register_ipte_notifier(struct gmap_notifier *);
|
||||
void gmap_unregister_ipte_notifier(struct gmap_notifier *);
|
||||
int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
|
||||
void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
|
||||
void gmap_do_ipte_notify(struct mm_struct *, pte_t *);
|
||||
|
||||
static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep, pgste_t pgste)
|
||||
{
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (pgste_val(pgste) & PGSTE_IN_BIT) {
|
||||
pgste_val(pgste) &= ~PGSTE_IN_BIT;
|
||||
gmap_do_ipte_notify(mm, addr, ptep);
|
||||
gmap_do_ipte_notify(mm, ptep);
|
||||
}
|
||||
#endif
|
||||
return pgste;
|
||||
|
|
@ -852,6 +867,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
|
||||
pgste_set_key(ptep, pgste, entry);
|
||||
pgste_set_pte(ptep, entry);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
|
|
@ -881,6 +897,12 @@ static inline int pte_young(pte_t pte)
|
|||
return (pte_val(pte) & _PAGE_YOUNG) != 0;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTE_UNUSED
|
||||
static inline int pte_unused(pte_t pte)
|
||||
{
|
||||
return pte_val(pte) & _PAGE_UNUSED;
|
||||
}
|
||||
|
||||
/*
|
||||
* pgd/pmd/pte modification functions
|
||||
*/
|
||||
|
|
@ -1034,30 +1056,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
|
|||
|
||||
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
|
||||
{
|
||||
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
|
||||
unsigned long pto = (unsigned long) ptep;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/* pto must point to the start of the segment table */
|
||||
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
|
||||
#else
|
||||
/* ipte in zarch mode can do the math */
|
||||
pte_t *pto = ptep;
|
||||
/* pto in ESA mode must point to the start of the segment table */
|
||||
pto &= 0x7ffffc00;
|
||||
#endif
|
||||
asm volatile(
|
||||
" ipte %2,%3"
|
||||
: "=m" (*ptep) : "m" (*ptep),
|
||||
"a" (pto), "a" (address));
|
||||
}
|
||||
/* Invalidation + global TLB flush for the pte */
|
||||
asm volatile(
|
||||
" ipte %2,%3"
|
||||
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
|
||||
}
|
||||
|
||||
static inline void ptep_flush_direct(struct mm_struct *mm,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
if (pte_val(*ptep) & _PAGE_INVALID)
|
||||
return;
|
||||
__ptep_ipte(address, ptep);
|
||||
}
|
||||
|
||||
static inline void ptep_flush_lazy(struct mm_struct *mm,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
int active = (mm == current->active_mm) ? 1 : 0;
|
||||
int active, count;
|
||||
|
||||
if (atomic_read(&mm->context.attach_count) > active)
|
||||
__ptep_ipte(address, ptep);
|
||||
else
|
||||
if (pte_val(*ptep) & _PAGE_INVALID)
|
||||
return;
|
||||
active = (mm == current->active_mm) ? 1 : 0;
|
||||
count = atomic_add_return(0x10000, &mm->context.attach_count);
|
||||
if ((count & 0xffff) <= active) {
|
||||
pte_val(*ptep) |= _PAGE_INVALID;
|
||||
mm->context.flush_mm = 1;
|
||||
} else
|
||||
__ptep_ipte(address, ptep);
|
||||
atomic_sub(0x10000, &mm->context.attach_count);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
|
|
@ -1070,11 +1103,11 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
|||
|
||||
if (mm_has_pgste(vma->vm_mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
|
||||
pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
__ptep_ipte(addr, ptep);
|
||||
ptep_flush_direct(vma->vm_mm, addr, ptep);
|
||||
young = pte_young(pte);
|
||||
pte = pte_mkold(pte);
|
||||
|
||||
|
|
@ -1116,7 +1149,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|||
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
pgste = pgste_ipte_notify(mm, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
|
|
@ -1140,12 +1173,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
|
|||
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
pgste_ipte_notify(mm, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
ptep_flush_lazy(mm, address, ptep);
|
||||
pte_val(*ptep) |= _PAGE_INVALID;
|
||||
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_update_all(&pte, pgste);
|
||||
|
|
@ -1178,14 +1210,17 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
|
|||
|
||||
if (mm_has_pgste(vma->vm_mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
|
||||
pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
__ptep_ipte(address, ptep);
|
||||
ptep_flush_direct(vma->vm_mm, address, ptep);
|
||||
pte_val(*ptep) = _PAGE_INVALID;
|
||||
|
||||
if (mm_has_pgste(vma->vm_mm)) {
|
||||
if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
|
||||
_PGSTE_GPS_USAGE_UNUSED)
|
||||
pte_val(pte) |= _PAGE_UNUSED;
|
||||
pgste = pgste_update_all(&pte, pgste);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
}
|
||||
|
|
@ -1209,7 +1244,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
|||
|
||||
if (!full && mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
pgste = pgste_ipte_notify(mm, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
|
|
@ -1234,7 +1269,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
|
|||
if (pte_write(pte)) {
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
pgste = pgste_ipte_notify(mm, ptep, pgste);
|
||||
}
|
||||
|
||||
ptep_flush_lazy(mm, address, ptep);
|
||||
|
|
@ -1260,10 +1295,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
return 0;
|
||||
if (mm_has_pgste(vma->vm_mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
|
||||
pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
|
||||
}
|
||||
|
||||
__ptep_ipte(address, ptep);
|
||||
ptep_flush_direct(vma->vm_mm, address, ptep);
|
||||
|
||||
if (mm_has_pgste(vma->vm_mm)) {
|
||||
pgste_set_pte(ptep, entry);
|
||||
|
|
@ -1447,12 +1482,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
|||
static inline void pmdp_flush_lazy(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
int active = (mm == current->active_mm) ? 1 : 0;
|
||||
int active, count;
|
||||
|
||||
if ((atomic_read(&mm->context.attach_count) & 0xffff) > active)
|
||||
__pmd_idte(address, pmdp);
|
||||
else
|
||||
active = (mm == current->active_mm) ? 1 : 0;
|
||||
count = atomic_add_return(0x10000, &mm->context.attach_count);
|
||||
if ((count & 0xffff) <= active) {
|
||||
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
|
||||
mm->context.flush_mm = 1;
|
||||
} else
|
||||
__pmd_idte(address, pmdp);
|
||||
atomic_sub(0x10000, &mm->context.attach_count);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
|
|
|||
|
|
@ -83,6 +83,7 @@ struct per_struct_kernel {
|
|||
* These are defined as per linux/ptrace.h, which see.
|
||||
*/
|
||||
#define arch_has_single_step() (1)
|
||||
#define arch_has_block_step() (1)
|
||||
|
||||
#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
|
||||
#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ int sclp_cpu_configure(u8 cpu);
|
|||
int sclp_cpu_deconfigure(u8 cpu);
|
||||
unsigned long long sclp_get_rnmax(void);
|
||||
unsigned long long sclp_get_rzm(void);
|
||||
unsigned int sclp_get_max_cpu(void);
|
||||
int sclp_sdias_blk_count(void);
|
||||
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
|
||||
int sclp_chp_configure(struct chp_id chpid);
|
||||
|
|
|
|||
|
|
@ -59,7 +59,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
|
|||
#define MACHINE_FLAG_DIAG44 (1UL << 4)
|
||||
#define MACHINE_FLAG_IDTE (1UL << 5)
|
||||
#define MACHINE_FLAG_DIAG9C (1UL << 6)
|
||||
#define MACHINE_FLAG_MVCOS (1UL << 7)
|
||||
#define MACHINE_FLAG_KVM (1UL << 8)
|
||||
#define MACHINE_FLAG_ESOP (1UL << 9)
|
||||
#define MACHINE_FLAG_EDAT1 (1UL << 10)
|
||||
|
|
@ -85,7 +84,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
|
|||
#define MACHINE_HAS_IDTE (0)
|
||||
#define MACHINE_HAS_DIAG44 (1)
|
||||
#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
|
||||
#define MACHINE_HAS_MVCOS (0)
|
||||
#define MACHINE_HAS_EDAT1 (0)
|
||||
#define MACHINE_HAS_EDAT2 (0)
|
||||
#define MACHINE_HAS_LPP (0)
|
||||
|
|
@ -98,7 +96,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
|
|||
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
|
||||
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
|
||||
#define MACHINE_HAS_MVPG (1)
|
||||
#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
|
||||
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
|
||||
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
|
||||
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
|
||||
|
|
|
|||
|
|
@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
|
||||
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
|
||||
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
|
|
@ -91,11 +92,13 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
||||
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
||||
#define TIF_BLOCK_STEP 21 /* This task is block stepped */
|
||||
|
||||
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
|
||||
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
|
||||
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
|
|
|
|||
|
|
@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
|
|||
#define ARCH_HAS_SORT_EXTABLE
|
||||
#define ARCH_HAS_SEARCH_EXTABLE
|
||||
|
||||
struct uaccess_ops {
|
||||
size_t (*copy_from_user)(size_t, const void __user *, void *);
|
||||
size_t (*copy_to_user)(size_t, void __user *, const void *);
|
||||
size_t (*copy_in_user)(size_t, void __user *, const void __user *);
|
||||
size_t (*clear_user)(size_t, void __user *);
|
||||
size_t (*strnlen_user)(size_t, const char __user *);
|
||||
size_t (*strncpy_from_user)(size_t, const char __user *, char *);
|
||||
int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
|
||||
int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
|
||||
};
|
||||
int __handle_fault(unsigned long, unsigned long, int);
|
||||
|
||||
extern struct uaccess_ops uaccess;
|
||||
extern struct uaccess_ops uaccess_mvcos;
|
||||
extern struct uaccess_ops uaccess_pt;
|
||||
/**
|
||||
* __copy_from_user: - Copy a block of data from user space, with less checking.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*/
|
||||
unsigned long __must_check __copy_from_user(void *to, const void __user *from,
|
||||
unsigned long n);
|
||||
|
||||
extern int __handle_fault(unsigned long, unsigned long, int);
|
||||
/**
|
||||
* __copy_to_user: - Copy a block of data into user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
unsigned long __must_check __copy_to_user(void __user *to, const void *from,
|
||||
unsigned long n);
|
||||
|
||||
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
|
||||
{
|
||||
size = uaccess.copy_to_user(size, ptr, x);
|
||||
return size ? -EFAULT : size;
|
||||
size = __copy_to_user(ptr, x, size);
|
||||
return size ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
|
||||
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
|
||||
{
|
||||
size = uaccess.copy_from_user(size, ptr, x);
|
||||
return size ? -EFAULT : size;
|
||||
size = __copy_from_user(x, ptr, size);
|
||||
return size ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -135,8 +160,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
|
|||
case 2: \
|
||||
case 4: \
|
||||
case 8: \
|
||||
__pu_err = __put_user_fn(sizeof (*(ptr)), \
|
||||
ptr, &__x); \
|
||||
__pu_err = __put_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
|
|
@ -152,7 +177,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
|
|||
})
|
||||
|
||||
|
||||
extern int __put_user_bad(void) __attribute__((noreturn));
|
||||
int __put_user_bad(void) __attribute__((noreturn));
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
|
|
@ -161,29 +186,29 @@ extern int __put_user_bad(void) __attribute__((noreturn));
|
|||
switch (sizeof(*(ptr))) { \
|
||||
case 1: { \
|
||||
unsigned char __x; \
|
||||
__gu_err = __get_user_fn(sizeof (*(ptr)), \
|
||||
ptr, &__x); \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 2: { \
|
||||
unsigned short __x; \
|
||||
__gu_err = __get_user_fn(sizeof (*(ptr)), \
|
||||
ptr, &__x); \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 4: { \
|
||||
unsigned int __x; \
|
||||
__gu_err = __get_user_fn(sizeof (*(ptr)), \
|
||||
ptr, &__x); \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 8: { \
|
||||
unsigned long long __x; \
|
||||
__gu_err = __get_user_fn(sizeof (*(ptr)), \
|
||||
ptr, &__x); \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
|
|
@ -200,34 +225,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
|
|||
__get_user(x, ptr); \
|
||||
})
|
||||
|
||||
extern int __get_user_bad(void) __attribute__((noreturn));
|
||||
int __get_user_bad(void) __attribute__((noreturn));
|
||||
|
||||
#define __put_user_unaligned __put_user
|
||||
#define __get_user_unaligned __get_user
|
||||
|
||||
/**
|
||||
* __copy_to_user: - Copy a block of data into user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
return uaccess.copy_to_user(n, to, from);
|
||||
}
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
/**
|
||||
* copy_to_user: - Copy a block of data into user space.
|
||||
* @to: Destination address, in user space.
|
||||
|
|
@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
return __copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* __copy_from_user: - Copy a block of data from user space, with less checking.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*/
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
return uaccess.copy_from_user(n, from, to);
|
||||
}
|
||||
|
||||
extern void copy_from_user_overflow(void)
|
||||
void copy_from_user_overflow(void)
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
#endif
|
||||
|
|
@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
return __copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
return uaccess.copy_in_user(n, to, from);
|
||||
}
|
||||
unsigned long __must_check
|
||||
__copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
|
|
@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
|||
/*
|
||||
* Copy a null terminated string from userspace.
|
||||
*/
|
||||
|
||||
long __strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
|
||||
static inline long __must_check
|
||||
strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
{
|
||||
might_fault();
|
||||
return uaccess.strncpy_from_user(count, src, dst);
|
||||
return __strncpy_from_user(dst, src, count);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
strnlen_user(const char __user * src, unsigned long n)
|
||||
unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
|
||||
|
||||
static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return uaccess.strnlen_user(n, src);
|
||||
return __strnlen_user(src, n);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -355,21 +335,14 @@ strnlen_user(const char __user * src, unsigned long n)
|
|||
/*
|
||||
* Zero Userspace
|
||||
*/
|
||||
unsigned long __must_check __clear_user(void __user *to, unsigned long size);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
return uaccess.clear_user(n, to);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
clear_user(void __user *to, unsigned long n)
|
||||
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return uaccess.clear_user(n, to);
|
||||
return __clear_user(to, n);
|
||||
}
|
||||
|
||||
extern int copy_to_user_real(void __user *dest, void *src, size_t count);
|
||||
extern int copy_from_user_real(void *dest, void __user *src, size_t count);
|
||||
int copy_to_user_real(void __user *dest, void *src, unsigned long count);
|
||||
|
||||
#endif /* __S390_UACCESS_H */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue