This is the 3.10.30 stable release
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJS/T2oAAoJEDjbvchgkmk+St0QAMSLN95GKAIRjOMpkz9VAncw gw5KdMFYcdT4pQAIX6/yIpv9o+YVDNFjvtGv2Jlrp+EZDLFSsTG/kvlgYN/SrkkM Lr4X6sC4yNh5LD/nTtwZE2KagKwydIPedOqKXlwW7in4xK6Y8BvUN+9YMTujO8nS VyDKAdKHYIDtWoAYgn5uYj9RSwvYLKJuYqs7FWQvoluyWNbPc/rS0J1k/dcOO41i dNwvDMBniJ3xx/upg/RhZ9U+bIlNE2xQOYSBLXBZzWcl4YsZN/6O26oXpsz7fUpK lL3fqldCArCAgHr4hYBVcXantrQ3gRPM1xA3cX19eqbdSempQlSJQgiCeN+mJYuR c7vMM1fxL1kwHqv7sRyS9LaumspHX6AnqKm5eir60Vz7QCiHT0WBt65hnwlB8eRy pt+W1YKyPSW9FVDkNuu4I2u+V985HB3H4Fo4wNrF3pV/6JRhIsNWaGKeDIKGJqDT AeEhL+J+0qYlp8TOBSSUChHJQg6NRO0JoXnJV58dBDl6FrIQHfsAskZv1M/BuSax poW2Hg9AgmrIDvL7Mcn45mmspH/93673PO+58AK2PRfnjaZ/50GuFWklzX2lPLdW 7XaCyvRx4L8ws6moxoKhxqBaPT3XgfAiskeNuknwYrH8QKH8n+F+Ljj0veHD6fn3 XIz1/8sI84e1/lFynk0T =/s7B -----END PGP SIGNATURE----- Merge tag 'v3.10.30' into linux-linaro-lsk This is the 3.10.30 stable release
This commit is contained in:
commit
8415e60445
87 changed files with 954 additions and 465 deletions
|
|
@ -1372,8 +1372,8 @@ may allocate from based on an estimation of its current memory and swap use.
|
|||
For example, if a task is using all allowed memory, its badness score will be
|
||||
1000. If it is using half of its allowed memory, its score will be 500.
|
||||
|
||||
There is an additional factor included in the badness score: root
|
||||
processes are given 3% extra memory over other tasks.
|
||||
There is an additional factor included in the badness score: the current memory
|
||||
and swap usage is discounted by 3% for root processes.
|
||||
|
||||
The amount of "allowed" memory depends on the context in which the oom killer
|
||||
was called. If it is due to the memory assigned to the allocating task's cpuset
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ Supported adapters:
|
|||
* Intel Lynx Point-LP (PCH)
|
||||
* Intel Avoton (SOC)
|
||||
* Intel Wellsburg (PCH)
|
||||
* Intel Coleto Creek (PCH)
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -1,6 +1,6 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 29
|
||||
SUBLEVEL = 30
|
||||
EXTRAVERSION =
|
||||
NAME = TOSSUG Baby Fish
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/kdebug.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
|||
u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
|
||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 nbytes_lo, u32 nbytes_hi);
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ void platform_calibrate_ccount(void)
|
|||
* Ethernet -- OpenCores Ethernet MAC (ethoc driver)
|
||||
*/
|
||||
|
||||
static struct resource ethoc_res[] __initdata = {
|
||||
static struct resource ethoc_res[] = {
|
||||
[0] = { /* register space */
|
||||
.start = OETH_REGS_PADDR,
|
||||
.end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
|
||||
|
|
@ -213,7 +213,7 @@ static struct resource ethoc_res[] __initdata = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct ethoc_platform_data ethoc_pdata __initdata = {
|
||||
static struct ethoc_platform_data ethoc_pdata = {
|
||||
/*
|
||||
* The MAC address for these boards is 00:50:c2:13:6f:xx.
|
||||
* The last byte (here as zero) is read from the DIP switches on the
|
||||
|
|
@ -223,7 +223,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
|
|||
.phy_id = -1,
|
||||
};
|
||||
|
||||
static struct platform_device ethoc_device __initdata = {
|
||||
static struct platform_device ethoc_device = {
|
||||
.name = "ethoc",
|
||||
.id = -1,
|
||||
.num_resources = ARRAY_SIZE(ethoc_res),
|
||||
|
|
@ -237,13 +237,13 @@ static struct platform_device ethoc_device __initdata = {
|
|||
* UART
|
||||
*/
|
||||
|
||||
static struct resource serial_resource __initdata = {
|
||||
static struct resource serial_resource = {
|
||||
.start = DUART16552_PADDR,
|
||||
.end = DUART16552_PADDR + 0x1f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
static struct plat_serial8250_port serial_platform_data[] __initdata = {
|
||||
static struct plat_serial8250_port serial_platform_data[] = {
|
||||
[0] = {
|
||||
.mapbase = DUART16552_PADDR,
|
||||
.irq = DUART16552_INTNUM,
|
||||
|
|
@ -256,7 +256,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
|
|||
{ },
|
||||
};
|
||||
|
||||
static struct platform_device xtavnet_uart __initdata = {
|
||||
static struct platform_device xtavnet_uart = {
|
||||
.name = "serial8250",
|
||||
.id = PLAT8250_DEV_PLATFORM,
|
||||
.dev = {
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/proc_fs.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/regulator/machine.h>
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/mpspec.h>
|
||||
#endif
|
||||
|
|
@ -705,6 +706,14 @@ void __init acpi_early_init(void)
|
|||
goto error0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the system is using ACPI then we can be reasonably
|
||||
* confident that any regulators are managed by the firmware
|
||||
* so tell the regulator core it has everything it needs to
|
||||
* know.
|
||||
*/
|
||||
regulator_has_full_constraints();
|
||||
|
||||
return;
|
||||
|
||||
error0:
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
|
|||
}
|
||||
|
||||
struct sample {
|
||||
int core_pct_busy;
|
||||
int32_t core_pct_busy;
|
||||
u64 aperf;
|
||||
u64 mperf;
|
||||
int freq;
|
||||
|
|
@ -68,7 +68,7 @@ struct _pid {
|
|||
int32_t i_gain;
|
||||
int32_t d_gain;
|
||||
int deadband;
|
||||
int last_err;
|
||||
int32_t last_err;
|
||||
};
|
||||
|
||||
struct cpudata {
|
||||
|
|
@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
|
|||
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
|
||||
}
|
||||
|
||||
static signed int pid_calc(struct _pid *pid, int busy)
|
||||
static signed int pid_calc(struct _pid *pid, int32_t busy)
|
||||
{
|
||||
signed int err, result;
|
||||
signed int result;
|
||||
int32_t pterm, dterm, fp_error;
|
||||
int32_t integral_limit;
|
||||
|
||||
err = pid->setpoint - busy;
|
||||
fp_error = int_tofp(err);
|
||||
fp_error = int_tofp(pid->setpoint) - busy;
|
||||
|
||||
if (abs(err) <= pid->deadband)
|
||||
if (abs(fp_error) <= int_tofp(pid->deadband))
|
||||
return 0;
|
||||
|
||||
pterm = mul_fp(pid->p_gain, fp_error);
|
||||
|
|
@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
|
|||
if (pid->integral < -integral_limit)
|
||||
pid->integral = -integral_limit;
|
||||
|
||||
dterm = mul_fp(pid->d_gain, (err - pid->last_err));
|
||||
pid->last_err = err;
|
||||
dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
|
||||
pid->last_err = fp_error;
|
||||
|
||||
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
|
||||
|
||||
|
|
@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
|
|||
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
||||
{
|
||||
int max_perf = cpu->pstate.turbo_pstate;
|
||||
int max_perf_adj;
|
||||
int min_perf;
|
||||
if (limits.no_turbo)
|
||||
max_perf = cpu->pstate.max_pstate;
|
||||
|
||||
max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
|
||||
*max = clamp_t(int, max_perf,
|
||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
|
||||
*max = clamp_t(int, max_perf_adj,
|
||||
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
|
||||
|
||||
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
|
||||
|
|
@ -394,7 +394,10 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
|||
trace_cpu_frequency(pstate * 100000, cpu->cpu);
|
||||
|
||||
cpu->pstate.current_pstate = pstate;
|
||||
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
|
||||
if (limits.no_turbo)
|
||||
wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
|
||||
else
|
||||
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -432,8 +435,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
|
|||
struct sample *sample)
|
||||
{
|
||||
u64 core_pct;
|
||||
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
|
||||
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
|
||||
core_pct = div64_u64(int_tofp(sample->aperf * 100),
|
||||
sample->mperf);
|
||||
sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
|
||||
|
||||
sample->core_pct_busy = core_pct;
|
||||
}
|
||||
|
|
@ -465,22 +469,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
|
|||
mod_timer_pinned(&cpu->timer, jiffies + delay);
|
||||
}
|
||||
|
||||
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||
{
|
||||
int32_t busy_scaled;
|
||||
int32_t core_busy, max_pstate, current_pstate;
|
||||
|
||||
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
|
||||
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
|
||||
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
||||
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
||||
busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||
|
||||
return fp_toint(busy_scaled);
|
||||
return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||
}
|
||||
|
||||
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
|
||||
{
|
||||
int busy_scaled;
|
||||
int32_t busy_scaled;
|
||||
struct _pid *pid;
|
||||
signed int ctl = 0;
|
||||
int steps;
|
||||
|
|
@ -523,6 +524,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
ICPU(0x2a, default_policy),
|
||||
ICPU(0x2d, default_policy),
|
||||
ICPU(0x3a, default_policy),
|
||||
ICPU(0x3c, default_policy),
|
||||
ICPU(0x3e, default_policy),
|
||||
ICPU(0x3f, default_policy),
|
||||
ICPU(0x45, default_policy),
|
||||
ICPU(0x46, default_policy),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
|
|
|||
|
|
@ -275,11 +275,13 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
|
|||
}
|
||||
|
||||
if (slot) {
|
||||
edev->res[i].name = NULL;
|
||||
edev->res[i].start = SLOT_ADDRESS(root, slot)
|
||||
+ (i * 0x400);
|
||||
edev->res[i].end = edev->res[i].start + 0xff;
|
||||
edev->res[i].flags = IORESOURCE_IO;
|
||||
} else {
|
||||
edev->res[i].name = NULL;
|
||||
edev->res[i].start = SLOT_ADDRESS(root, slot)
|
||||
+ EISA_VENDOR_ID_OFFSET;
|
||||
edev->res[i].end = edev->res[i].start + 3;
|
||||
|
|
@ -326,13 +328,6 @@ static int __init eisa_probe(struct eisa_root_device *root)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (eisa_init_device(root, edev, 0)) {
|
||||
kfree(edev);
|
||||
if (!root->force_probe)
|
||||
return -ENODEV;
|
||||
goto force_probe;
|
||||
}
|
||||
|
||||
if (eisa_request_resources(root, edev, 0)) {
|
||||
dev_warn(root->dev,
|
||||
"EISA: Cannot allocate resource for mainboard\n");
|
||||
|
|
@ -342,6 +337,14 @@ static int __init eisa_probe(struct eisa_root_device *root)
|
|||
goto force_probe;
|
||||
}
|
||||
|
||||
if (eisa_init_device(root, edev, 0)) {
|
||||
eisa_release_resources(edev);
|
||||
kfree(edev);
|
||||
if (!root->force_probe)
|
||||
return -ENODEV;
|
||||
goto force_probe;
|
||||
}
|
||||
|
||||
dev_info(&edev->dev, "EISA: Mainboard %s detected\n", edev->id.sig);
|
||||
|
||||
if (eisa_register_device(edev)) {
|
||||
|
|
@ -361,11 +364,6 @@ static int __init eisa_probe(struct eisa_root_device *root)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (eisa_init_device(root, edev, i)) {
|
||||
kfree(edev);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (eisa_request_resources(root, edev, i)) {
|
||||
dev_warn(root->dev,
|
||||
"Cannot allocate resource for EISA slot %d\n",
|
||||
|
|
@ -374,6 +372,12 @@ static int __init eisa_probe(struct eisa_root_device *root)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (eisa_init_device(root, edev, i)) {
|
||||
eisa_release_resources(edev);
|
||||
kfree(edev);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
|
||||
enabled_str = " (forced enabled)";
|
||||
else if (edev->state == EISA_CONFIG_FORCED)
|
||||
|
|
|
|||
|
|
@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
|
|||
sr07 |= 0x11;
|
||||
break;
|
||||
case 16:
|
||||
sr07 |= 0xc1;
|
||||
hdr = 0xc0;
|
||||
sr07 |= 0x17;
|
||||
hdr = 0xc1;
|
||||
break;
|
||||
case 24:
|
||||
sr07 |= 0x15;
|
||||
|
|
|
|||
|
|
@ -1687,6 +1687,7 @@ out_gem_unload:
|
|||
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
out_mtrrfree:
|
||||
if (dev_priv->mm.gtt_mtrr >= 0) {
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
|
|||
}
|
||||
|
||||
sg = st->sgl;
|
||||
sg->offset = offset;
|
||||
sg->offset = 0;
|
||||
sg->length = size;
|
||||
|
||||
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
|
||||
|
|
|
|||
|
|
@ -1682,9 +1682,13 @@
|
|||
* Please check the detailed lore in the commit message for for experimental
|
||||
* evidence.
|
||||
*/
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
|
||||
/* VLV DP/HDMI bits again match Bspec */
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
|
||||
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
|
||||
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
|
||||
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
|
||||
|
|
|
|||
|
|
@ -2277,18 +2277,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
|
|||
return status;
|
||||
}
|
||||
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
case PORT_C:
|
||||
bit = PORTC_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
case PORT_D:
|
||||
bit = PORTD_HOTPLUG_LIVE_STATUS;
|
||||
break;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
|
||||
break;
|
||||
case PORT_C:
|
||||
bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
|
||||
break;
|
||||
case PORT_D:
|
||||
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
|
||||
break;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
}
|
||||
} else {
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
|
||||
break;
|
||||
case PORT_C:
|
||||
bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
|
||||
break;
|
||||
case PORT_D:
|
||||
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
|
||||
break;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
}
|
||||
}
|
||||
|
||||
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
|
||||
|
|
|
|||
|
|
@ -1459,8 +1459,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
|
|||
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
|
||||
}
|
||||
|
||||
static int __intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
int bytes)
|
||||
static int __intel_ring_prepare(struct intel_ring_buffer *ring,
|
||||
int bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
@ -1476,7 +1476,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ring->space -= bytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1491,12 +1490,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Preallocate the olr before touching the ring */
|
||||
ret = intel_ring_alloc_seqno(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
|
||||
ring->space -= num_dwords * sizeof(uint32_t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
||||
|
|
|
|||
|
|
@ -1477,11 +1477,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
|||
(mga_vga_calculate_mode_bandwidth(mode, bpp)
|
||||
> (32700 * 1024))) {
|
||||
return MODE_BANDWIDTH;
|
||||
} else if (mode->type == G200_EH &&
|
||||
} else if (mdev->type == G200_EH &&
|
||||
(mga_vga_calculate_mode_bandwidth(mode, bpp)
|
||||
> (37500 * 1024))) {
|
||||
return MODE_BANDWIDTH;
|
||||
} else if (mode->type == G200_ER &&
|
||||
} else if (mdev->type == G200_ER &&
|
||||
(mga_vga_calculate_mode_bandwidth(mode,
|
||||
bpp) > (55000 * 1024))) {
|
||||
return MODE_BANDWIDTH;
|
||||
|
|
|
|||
|
|
@ -788,25 +788,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct nouveau_mem *node = old_mem->mm_node;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
u64 length = (new_mem->num_pages << PAGE_SHIFT);
|
||||
u64 src_offset = node->vma[0].offset;
|
||||
u64 dst_offset = node->vma[1].offset;
|
||||
int src_tiled = !!node->memtype;
|
||||
int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
|
||||
int ret;
|
||||
|
||||
while (length) {
|
||||
u32 amount, stride, height;
|
||||
|
||||
ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
amount = min(length, (u64)(4 * 1024 * 1024));
|
||||
stride = 16 * 4;
|
||||
height = amount / stride;
|
||||
|
||||
if (old_mem->mem_type == TTM_PL_VRAM &&
|
||||
nouveau_bo_tile_layout(nvbo)) {
|
||||
ret = RING_SPACE(chan, 8);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (src_tiled) {
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 0);
|
||||
|
|
@ -816,19 +816,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 0);
|
||||
} else {
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
|
||||
OUT_RING (chan, 1);
|
||||
}
|
||||
if (new_mem->mem_type == TTM_PL_VRAM &&
|
||||
nouveau_bo_tile_layout(nvbo)) {
|
||||
ret = RING_SPACE(chan, 8);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dst_tiled) {
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 0);
|
||||
|
|
@ -838,18 +829,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 0);
|
||||
} else {
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
|
||||
OUT_RING (chan, 1);
|
||||
}
|
||||
|
||||
ret = RING_SPACE(chan, 14);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
|
||||
OUT_RING (chan, upper_32_bits(src_offset));
|
||||
OUT_RING (chan, upper_32_bits(dst_offset));
|
||||
|
|
|
|||
|
|
@ -938,11 +938,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
|
|||
radeon_atombios_get_ppll_ss_info(rdev,
|
||||
&radeon_crtc->ss,
|
||||
ATOM_DP_SS_ID1);
|
||||
} else
|
||||
} else {
|
||||
radeon_crtc->ss_enabled =
|
||||
radeon_atombios_get_ppll_ss_info(rdev,
|
||||
&radeon_crtc->ss,
|
||||
ATOM_DP_SS_ID1);
|
||||
}
|
||||
/* disable spread spectrum on DCE3 DP */
|
||||
radeon_crtc->ss_enabled = false;
|
||||
}
|
||||
break;
|
||||
case ATOM_ENCODER_MODE_LVDS:
|
||||
|
|
|
|||
|
|
@ -3792,8 +3792,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
|
|||
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
|
||||
}
|
||||
|
||||
/* only one DAC on DCE6 */
|
||||
if (!ASIC_IS_DCE6(rdev))
|
||||
/* only one DAC on DCE5 */
|
||||
if (!ASIC_IS_DCE5(rdev))
|
||||
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
|
||||
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
|
||||
|
||||
|
|
|
|||
|
|
@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
|
|||
if (track->cb_dirty) {
|
||||
tmp = track->cb_target_mask;
|
||||
for (i = 0; i < 8; i++) {
|
||||
if ((tmp >> (i * 4)) & 0xF) {
|
||||
u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
|
||||
|
||||
if (format != V_028C70_COLOR_INVALID &&
|
||||
(tmp >> (i * 4)) & 0xF) {
|
||||
/* at least one component is enabled */
|
||||
if (track->cb_color_bo[i] == NULL) {
|
||||
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
|
||||
|
|
|
|||
|
|
@ -1178,13 +1178,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
|
|||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA;
|
||||
|
||||
/* flush read cache over gart for this vmid */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
|
||||
radeon_ring_write(ring, 0xFFFFFFFF);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
|
|
@ -1200,6 +1199,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
|
|||
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA;
|
||||
|
||||
/* set to DX10/11 mode */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
|
|
@ -1224,14 +1225,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
(ib->vm ? (ib->vm->id << 24) : 0));
|
||||
|
||||
/* flush read cache over gart for this vmid */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
|
||||
radeon_ring_write(ring, 0xFFFFFFFF);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
|
||||
}
|
||||
|
||||
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
|
||||
|
|
|
|||
|
|
@ -573,6 +573,7 @@
|
|||
# define PACKET3_DB_ACTION_ENA (1 << 26)
|
||||
# define PACKET3_SH_ACTION_ENA (1 << 27)
|
||||
# define PACKET3_SX_ACTION_ENA (1 << 28)
|
||||
# define PACKET3_ENGINE_ME (1 << 31)
|
||||
#define PACKET3_ME_INITIALIZE 0x44
|
||||
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
|
||||
#define PACKET3_COND_WRITE 0x45
|
||||
|
|
|
|||
|
|
@ -2957,14 +2957,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
|
|||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA;
|
||||
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
|
||||
|
||||
if (rdev->wb.use_event) {
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
/* flush read cache over gart */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(ring, cp_coher_cntl);
|
||||
radeon_ring_write(ring, 0xFFFFFFFF);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
|
|
@ -2978,9 +2981,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
|
|||
} else {
|
||||
/* flush read cache over gart */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(ring, cp_coher_cntl);
|
||||
radeon_ring_write(ring, 0xFFFFFFFF);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
|
|
|
|||
|
|
@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
|
|||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if ((tmp >> (i * 4)) & 0xF) {
|
||||
u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
|
||||
|
||||
if (format != V_0280A0_COLOR_INVALID &&
|
||||
(tmp >> (i * 4)) & 0xF) {
|
||||
/* at least one component is enabled */
|
||||
if (track->cb_color_bo[i] == NULL) {
|
||||
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
|
||||
|
|
|
|||
|
|
@ -1283,6 +1283,7 @@
|
|||
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
|
||||
#define PACKET3_SURFACE_SYNC 0x43
|
||||
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
|
||||
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
|
||||
# define PACKET3_TC_ACTION_ENA (1 << 23)
|
||||
# define PACKET3_VC_ACTION_ENA (1 << 24)
|
||||
# define PACKET3_CB_ACTION_ENA (1 << 25)
|
||||
|
|
|
|||
|
|
@ -2926,6 +2926,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
|
|||
/* tell the bios not to handle mode switching */
|
||||
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
|
||||
|
||||
/* clear the vbios dpms state */
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
|
||||
|
||||
if (rdev->family >= CHIP_R600) {
|
||||
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
|
||||
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
|
||||
|
|
|
|||
|
|
@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
|
|||
/* Add the default buses */
|
||||
void radeon_i2c_init(struct radeon_device *rdev)
|
||||
{
|
||||
if (radeon_hw_i2c)
|
||||
DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
|
||||
|
||||
if (rdev->is_atom_bios)
|
||||
radeon_atombios_i2c_init(rdev);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -561,8 +561,10 @@ void radeon_pm_resume(struct radeon_device *rdev)
|
|||
rdev->pm.current_clock_mode_index = 0;
|
||||
rdev->pm.current_sclk = rdev->pm.default_sclk;
|
||||
rdev->pm.current_mclk = rdev->pm.default_mclk;
|
||||
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
|
||||
rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
|
||||
if (rdev->pm.power_state) {
|
||||
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
|
||||
rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
|
||||
}
|
||||
if (rdev->pm.pm_method == PM_METHOD_DYNPM
|
||||
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
|
||||
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
|
||||
|
|
|
|||
|
|
@ -4519,7 +4519,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
if (!ASIC_IS_NODCE(rdev)) {
|
||||
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
|
||||
WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
|
||||
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD1_INT_CONTROL, tmp);
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@
|
|||
# define GRPH_PFLIP_INT_MASK (1 << 0)
|
||||
# define GRPH_PFLIP_INT_TYPE (1 << 8)
|
||||
|
||||
#define DACA_AUTODETECT_INT_CONTROL 0x66c8
|
||||
#define DAC_AUTODETECT_INT_CONTROL 0x67c8
|
||||
|
||||
#define DC_HPD1_INT_STATUS 0x601c
|
||||
#define DC_HPD2_INT_STATUS 0x6028
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ config I2C_I801
|
|||
Lynx Point-LP (PCH)
|
||||
Avoton (SOC)
|
||||
Wellsburg (PCH)
|
||||
Coleto Creek (PCH)
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called i2c-i801.
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@
|
|||
Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes
|
||||
Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
|
||||
Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
|
||||
Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
|
||||
|
||||
Features supported by this driver:
|
||||
Software PEC no
|
||||
|
|
@ -169,6 +170,7 @@
|
|||
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
|
||||
#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
|
||||
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
|
||||
#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
|
||||
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
|
||||
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
|
||||
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
|
||||
|
|
@ -817,6 +819,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
|
|||
struct qib_sge *sge;
|
||||
struct ib_wc wc;
|
||||
u32 length;
|
||||
enum ib_qp_type sqptype, dqptype;
|
||||
|
||||
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
|
||||
if (!qp) {
|
||||
ibp->n_pkt_drops++;
|
||||
return;
|
||||
}
|
||||
if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
|
||||
|
||||
sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
|
||||
IB_QPT_UD : sqp->ibqp.qp_type;
|
||||
dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
|
||||
IB_QPT_UD : qp->ibqp.qp_type;
|
||||
|
||||
if (dqptype != sqptype ||
|
||||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
|
||||
ibp->n_pkt_drops++;
|
||||
goto drop;
|
||||
|
|
|
|||
|
|
@ -917,7 +917,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
|
|||
|
||||
/* If range covers entire pagetable, free it */
|
||||
if (!(start_pfn > level_pfn ||
|
||||
last_pfn < level_pfn + level_size(level))) {
|
||||
last_pfn < level_pfn + level_size(level) - 1)) {
|
||||
dma_clear_pte(pte);
|
||||
domain_flush_cache(domain, pte, sizeof(*pte));
|
||||
free_pgtable_page(level_pte);
|
||||
|
|
|
|||
|
|
@ -176,8 +176,12 @@ config MD_FAULTY
|
|||
|
||||
source "drivers/md/bcache/Kconfig"
|
||||
|
||||
config BLK_DEV_DM_BUILTIN
|
||||
boolean
|
||||
|
||||
config BLK_DEV_DM
|
||||
tristate "Device mapper support"
|
||||
select BLK_DEV_DM_BUILTIN
|
||||
---help---
|
||||
Device-mapper is a low level volume manager. It works by allowing
|
||||
people to specify mappings for ranges of logical sectors. Various
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
|
|||
obj-$(CONFIG_BCACHE) += bcache/
|
||||
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
|
||||
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
|
||||
obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
|
||||
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
|
||||
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
|
||||
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
|
||||
|
|
|
|||
48
drivers/md/dm-builtin.c
Normal file
48
drivers/md/dm-builtin.c
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
#include "dm.h"
|
||||
|
||||
/*
|
||||
* The kobject release method must not be placed in the module itself,
|
||||
* otherwise we are subject to module unload races.
|
||||
*
|
||||
* The release method is called when the last reference to the kobject is
|
||||
* dropped. It may be called by any other kernel code that drops the last
|
||||
* reference.
|
||||
*
|
||||
* The release method suffers from module unload race. We may prevent the
|
||||
* module from being unloaded at the start of the release method (using
|
||||
* increased module reference count or synchronizing against the release
|
||||
* method), however there is no way to prevent the module from being
|
||||
* unloaded at the end of the release method.
|
||||
*
|
||||
* If this code were placed in the dm module, the following race may
|
||||
* happen:
|
||||
* 1. Some other process takes a reference to dm kobject
|
||||
* 2. The user issues ioctl function to unload the dm device
|
||||
* 3. dm_sysfs_exit calls kobject_put, however the object is not released
|
||||
* because of the other reference taken at step 1
|
||||
* 4. dm_sysfs_exit waits on the completion
|
||||
* 5. The other process that took the reference in step 1 drops it,
|
||||
* dm_kobject_release is called from this process
|
||||
* 6. dm_kobject_release calls complete()
|
||||
* 7. a reschedule happens before dm_kobject_release returns
|
||||
* 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
|
||||
* count is decremented
|
||||
* 9. The user unloads the dm module
|
||||
* 10. The other process that was rescheduled in step 7 continues to run,
|
||||
* it is now executing code in unloaded module, so it crashes
|
||||
*
|
||||
* Note that if the process that takes the foreign reference to dm kobject
|
||||
* has a low priority and the system is sufficiently loaded with
|
||||
* higher-priority processes that prevent the low-priority process from
|
||||
* being scheduled long enough, this bug may really happen.
|
||||
*
|
||||
* In order to fix this module unload race, we place the release method
|
||||
* into a helper code that is compiled directly into the kernel.
|
||||
*/
|
||||
|
||||
void dm_kobject_release(struct kobject *kobj)
|
||||
{
|
||||
complete(dm_get_completion_from_kobject(kobj));
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dm_kobject_release);
|
||||
|
|
@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
|
|||
static struct kobj_type dm_ktype = {
|
||||
.sysfs_ops = &dm_sysfs_ops,
|
||||
.default_attrs = dm_attrs,
|
||||
.release = dm_kobject_release,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
|
|||
*/
|
||||
void dm_sysfs_exit(struct mapped_device *md)
|
||||
{
|
||||
kobject_put(dm_kobject(md));
|
||||
struct kobject *kobj = dm_kobject(md);
|
||||
kobject_put(kobj);
|
||||
wait_for_completion(dm_get_completion_from_kobject(kobj));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1349,6 +1349,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
|
|||
return td->id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether @time (of block creation) is older than @td's last snapshot.
|
||||
* If so then the associated block is shared with the last snapshot device.
|
||||
* Any block on a device created *after* the device last got snapshotted is
|
||||
* necessarily not shared.
|
||||
*/
|
||||
static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
|
||||
{
|
||||
return td->snapshotted_time > time;
|
||||
|
|
@ -1458,6 +1464,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
|
|||
return r;
|
||||
}
|
||||
|
||||
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
||||
{
|
||||
int r;
|
||||
uint32_t ref_count;
|
||||
|
||||
down_read(&pmd->root_lock);
|
||||
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
||||
if (!r)
|
||||
*result = (ref_count != 0);
|
||||
up_read(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
|
||||
{
|
||||
int r;
|
||||
|
|
|
|||
|
|
@ -181,6 +181,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
|
|||
|
||||
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
|
||||
|
||||
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
||||
|
||||
/*
|
||||
* Returns -ENOSPC if the new size is too small and already allocated
|
||||
* blocks would be lost.
|
||||
|
|
|
|||
|
|
@ -512,6 +512,7 @@ struct dm_thin_new_mapping {
|
|||
unsigned quiesced:1;
|
||||
unsigned prepared:1;
|
||||
unsigned pass_discard:1;
|
||||
unsigned definitely_not_shared:1;
|
||||
|
||||
struct thin_c *tc;
|
||||
dm_block_t virt_block;
|
||||
|
|
@ -683,7 +684,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
|
|||
cell_defer_no_holder(tc, m->cell2);
|
||||
|
||||
if (m->pass_discard)
|
||||
remap_and_issue(tc, m->bio, m->data_block);
|
||||
if (m->definitely_not_shared)
|
||||
remap_and_issue(tc, m->bio, m->data_block);
|
||||
else {
|
||||
bool used = false;
|
||||
if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
|
||||
bio_endio(m->bio, 0);
|
||||
else
|
||||
remap_and_issue(tc, m->bio, m->data_block);
|
||||
}
|
||||
else
|
||||
bio_endio(m->bio, 0);
|
||||
|
||||
|
|
@ -751,13 +760,17 @@ static int ensure_next_mapping(struct pool *pool)
|
|||
|
||||
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
|
||||
{
|
||||
struct dm_thin_new_mapping *r = pool->next_mapping;
|
||||
struct dm_thin_new_mapping *m = pool->next_mapping;
|
||||
|
||||
BUG_ON(!pool->next_mapping);
|
||||
|
||||
memset(m, 0, sizeof(struct dm_thin_new_mapping));
|
||||
INIT_LIST_HEAD(&m->list);
|
||||
m->bio = NULL;
|
||||
|
||||
pool->next_mapping = NULL;
|
||||
|
||||
return r;
|
||||
return m;
|
||||
}
|
||||
|
||||
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
|
||||
|
|
@ -769,15 +782,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
|
|||
struct pool *pool = tc->pool;
|
||||
struct dm_thin_new_mapping *m = get_next_mapping(pool);
|
||||
|
||||
INIT_LIST_HEAD(&m->list);
|
||||
m->quiesced = 0;
|
||||
m->prepared = 0;
|
||||
m->tc = tc;
|
||||
m->virt_block = virt_block;
|
||||
m->data_block = data_dest;
|
||||
m->cell = cell;
|
||||
m->err = 0;
|
||||
m->bio = NULL;
|
||||
|
||||
if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
|
||||
m->quiesced = 1;
|
||||
|
|
@ -840,15 +848,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
|
|||
struct pool *pool = tc->pool;
|
||||
struct dm_thin_new_mapping *m = get_next_mapping(pool);
|
||||
|
||||
INIT_LIST_HEAD(&m->list);
|
||||
m->quiesced = 1;
|
||||
m->prepared = 0;
|
||||
m->tc = tc;
|
||||
m->virt_block = virt_block;
|
||||
m->data_block = data_block;
|
||||
m->cell = cell;
|
||||
m->err = 0;
|
||||
m->bio = NULL;
|
||||
|
||||
/*
|
||||
* If the whole block of data is being overwritten or we are not
|
||||
|
|
@ -1032,12 +1037,12 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|||
*/
|
||||
m = get_next_mapping(pool);
|
||||
m->tc = tc;
|
||||
m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
|
||||
m->pass_discard = pool->pf.discard_passdown;
|
||||
m->definitely_not_shared = !lookup_result.shared;
|
||||
m->virt_block = block;
|
||||
m->data_block = lookup_result.block;
|
||||
m->cell = cell;
|
||||
m->cell2 = cell2;
|
||||
m->err = 0;
|
||||
m->bio = bio;
|
||||
|
||||
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
|
||||
|
|
|
|||
|
|
@ -184,8 +184,8 @@ struct mapped_device {
|
|||
/* forced geometry settings */
|
||||
struct hd_geometry geometry;
|
||||
|
||||
/* sysfs handle */
|
||||
struct kobject kobj;
|
||||
/* kobject and completion */
|
||||
struct dm_kobject_holder kobj_holder;
|
||||
|
||||
/* zero-length flush that will be cloned and submitted to targets */
|
||||
struct bio flush_bio;
|
||||
|
|
@ -1904,6 +1904,7 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
init_waitqueue_head(&md->wait);
|
||||
INIT_WORK(&md->work, dm_wq_work);
|
||||
init_waitqueue_head(&md->eventq);
|
||||
init_completion(&md->kobj_holder.completion);
|
||||
|
||||
md->disk->major = _major;
|
||||
md->disk->first_minor = minor;
|
||||
|
|
@ -2735,20 +2736,14 @@ struct gendisk *dm_disk(struct mapped_device *md)
|
|||
|
||||
struct kobject *dm_kobject(struct mapped_device *md)
|
||||
{
|
||||
return &md->kobj;
|
||||
return &md->kobj_holder.kobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* struct mapped_device should not be exported outside of dm.c
|
||||
* so use this check to verify that kobj is part of md structure
|
||||
*/
|
||||
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
|
||||
{
|
||||
struct mapped_device *md;
|
||||
|
||||
md = container_of(kobj, struct mapped_device, kobj);
|
||||
if (&md->kobj != kobj)
|
||||
return NULL;
|
||||
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
|
||||
|
||||
if (test_bit(DMF_FREEING, &md->flags) ||
|
||||
dm_deleting_md(md))
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
/*
|
||||
* Suspend feature flags
|
||||
|
|
@ -125,11 +127,26 @@ void dm_interface_exit(void);
|
|||
/*
|
||||
* sysfs interface
|
||||
*/
|
||||
struct dm_kobject_holder {
|
||||
struct kobject kobj;
|
||||
struct completion completion;
|
||||
};
|
||||
|
||||
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
|
||||
{
|
||||
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
|
||||
}
|
||||
|
||||
int dm_sysfs_init(struct mapped_device *md);
|
||||
void dm_sysfs_exit(struct mapped_device *md);
|
||||
struct kobject *dm_kobject(struct mapped_device *md);
|
||||
struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
|
||||
|
||||
/*
|
||||
* The kobject helper
|
||||
*/
|
||||
void dm_kobject_release(struct kobject *kobj);
|
||||
|
||||
/*
|
||||
* Targets for linear and striped mappings
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to set this before the dm_tm_new_block() call below.
|
||||
*/
|
||||
ll->nr_blocks = nr_blocks;
|
||||
for (i = old_blocks; i < blocks; i++) {
|
||||
struct dm_block *b;
|
||||
struct disk_index_entry idx;
|
||||
|
|
@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
|
|||
r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
idx.blocknr = cpu_to_le64(dm_block_location(b));
|
||||
|
||||
r = dm_tm_unlock(ll->tm, b);
|
||||
|
|
@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
|
|||
return r;
|
||||
}
|
||||
|
||||
ll->nr_blocks = nr_blocks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -608,20 +608,38 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
|
|||
* Flick into a mode where all blocks get allocated in the new area.
|
||||
*/
|
||||
smm->begin = old_len;
|
||||
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
|
||||
memcpy(sm, &bootstrap_ops, sizeof(*sm));
|
||||
|
||||
/*
|
||||
* Extend.
|
||||
*/
|
||||
r = sm_ll_extend(&smm->ll, extra_blocks);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We repeatedly increment then commit until the commit doesn't
|
||||
* allocate any new blocks.
|
||||
*/
|
||||
do {
|
||||
for (i = old_len; !r && i < smm->begin; i++) {
|
||||
r = sm_ll_inc(&smm->ll, i, &ev);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
old_len = smm->begin;
|
||||
|
||||
r = sm_ll_commit(&smm->ll);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
} while (old_len != smm->begin);
|
||||
|
||||
out:
|
||||
/*
|
||||
* Switch back to normal behaviour.
|
||||
*/
|
||||
memcpy(&smm->sm, &ops, sizeof(smm->sm));
|
||||
for (i = old_len; !r && i < smm->begin; i++)
|
||||
r = sm_ll_inc(&smm->ll, i, &ev);
|
||||
|
||||
memcpy(sm, &ops, sizeof(*sm));
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -157,15 +157,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
|
||||
static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
|
||||
{
|
||||
u16 ret;
|
||||
|
||||
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
|
||||
dprintk("could not acquire lock");
|
||||
return 0;
|
||||
}
|
||||
|
||||
state->i2c_write_buffer[0] = reg >> 8;
|
||||
state->i2c_write_buffer[1] = reg & 0xff;
|
||||
|
||||
|
|
@ -183,6 +178,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
|
|||
dprintk("i2c read error on %d", reg);
|
||||
|
||||
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
|
||||
{
|
||||
u16 ret;
|
||||
|
||||
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
|
||||
dprintk("could not acquire lock");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __dib8000_read_word(state, reg);
|
||||
|
||||
mutex_unlock(&state->i2c_buffer_lock);
|
||||
|
||||
return ret;
|
||||
|
|
@ -192,8 +202,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
|
|||
{
|
||||
u16 rw[2];
|
||||
|
||||
rw[0] = dib8000_read_word(state, reg + 0);
|
||||
rw[1] = dib8000_read_word(state, reg + 1);
|
||||
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
|
||||
dprintk("could not acquire lock");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rw[0] = __dib8000_read_word(state, reg + 0);
|
||||
rw[1] = __dib8000_read_word(state, reg + 1);
|
||||
|
||||
mutex_unlock(&state->i2c_buffer_lock);
|
||||
|
||||
return ((rw[0] << 16) | (rw[1]));
|
||||
}
|
||||
|
|
@ -2445,7 +2462,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
|
|||
if (state->revision == 0x8090)
|
||||
internal = dib8000_read32(state, 23) / 1000;
|
||||
|
||||
if (state->autosearch_state == AS_SEARCHING_FFT) {
|
||||
if ((state->revision >= 0x8002) &&
|
||||
(state->autosearch_state == AS_SEARCHING_FFT)) {
|
||||
dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */
|
||||
dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
|
||||
|
||||
|
|
@ -2481,7 +2499,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
|
|||
dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
|
||||
dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
|
||||
dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
|
||||
} else if (state->autosearch_state == AS_SEARCHING_GUARD) {
|
||||
} else if ((state->revision >= 0x8002) &&
|
||||
(state->autosearch_state == AS_SEARCHING_GUARD)) {
|
||||
c->transmission_mode = TRANSMISSION_MODE_8K;
|
||||
c->guard_interval = GUARD_INTERVAL_1_8;
|
||||
c->inversion = 0;
|
||||
|
|
@ -2583,7 +2602,8 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
|
|||
struct dib8000_state *state = fe->demodulator_priv;
|
||||
u16 irq_pending = dib8000_read_word(state, 1284);
|
||||
|
||||
if (state->autosearch_state == AS_SEARCHING_FFT) {
|
||||
if ((state->revision >= 0x8002) &&
|
||||
(state->autosearch_state == AS_SEARCHING_FFT)) {
|
||||
if (irq_pending & 0x1) {
|
||||
dprintk("dib8000_autosearch_irq: max correlation result available");
|
||||
return 3;
|
||||
|
|
|
|||
|
|
@ -110,28 +110,94 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
|
|||
return b1[0];
|
||||
}
|
||||
|
||||
static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
|
||||
{
|
||||
struct m88rs2000_state *state = fe->demodulator_priv;
|
||||
u32 mclk;
|
||||
u8 reg;
|
||||
/* Must not be 0x00 or 0xff */
|
||||
reg = m88rs2000_readreg(state, 0x86);
|
||||
if (!reg || reg == 0xff)
|
||||
return 0;
|
||||
|
||||
reg /= 2;
|
||||
reg += 1;
|
||||
|
||||
mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
|
||||
|
||||
return mclk;
|
||||
}
|
||||
|
||||
static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
|
||||
{
|
||||
struct m88rs2000_state *state = fe->demodulator_priv;
|
||||
u32 mclk;
|
||||
s32 tmp;
|
||||
u8 reg;
|
||||
int ret;
|
||||
|
||||
mclk = m88rs2000_get_mclk(fe);
|
||||
if (!mclk)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
|
||||
if (tmp < 0)
|
||||
tmp += 4096;
|
||||
|
||||
/* Carrier Offset */
|
||||
ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
|
||||
|
||||
reg = m88rs2000_readreg(state, 0x9d);
|
||||
reg &= 0xf;
|
||||
reg |= (u8)(tmp & 0xf) << 4;
|
||||
|
||||
ret |= m88rs2000_writereg(state, 0x9d, reg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
|
||||
{
|
||||
struct m88rs2000_state *state = fe->demodulator_priv;
|
||||
int ret;
|
||||
u32 temp;
|
||||
u64 temp;
|
||||
u32 mclk;
|
||||
u8 b[3];
|
||||
|
||||
if ((srate < 1000000) || (srate > 45000000))
|
||||
return -EINVAL;
|
||||
|
||||
mclk = m88rs2000_get_mclk(fe);
|
||||
if (!mclk)
|
||||
return -EINVAL;
|
||||
|
||||
temp = srate / 1000;
|
||||
temp *= 11831;
|
||||
temp /= 68;
|
||||
temp -= 3;
|
||||
temp *= 1 << 24;
|
||||
|
||||
do_div(temp, mclk);
|
||||
|
||||
b[0] = (u8) (temp >> 16) & 0xff;
|
||||
b[1] = (u8) (temp >> 8) & 0xff;
|
||||
b[2] = (u8) temp & 0xff;
|
||||
|
||||
ret = m88rs2000_writereg(state, 0x93, b[2]);
|
||||
ret |= m88rs2000_writereg(state, 0x94, b[1]);
|
||||
ret |= m88rs2000_writereg(state, 0x95, b[0]);
|
||||
|
||||
if (srate > 10000000)
|
||||
ret |= m88rs2000_writereg(state, 0xa0, 0x20);
|
||||
else
|
||||
ret |= m88rs2000_writereg(state, 0xa0, 0x60);
|
||||
|
||||
ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
|
||||
|
||||
if (srate > 12000000)
|
||||
ret |= m88rs2000_writereg(state, 0xa3, 0x20);
|
||||
else if (srate > 2800000)
|
||||
ret |= m88rs2000_writereg(state, 0xa3, 0x98);
|
||||
else
|
||||
ret |= m88rs2000_writereg(state, 0xa3, 0x90);
|
||||
|
||||
deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -260,8 +326,6 @@ struct inittab m88rs2000_shutdown[] = {
|
|||
};
|
||||
|
||||
struct inittab fe_reset[] = {
|
||||
{DEMOD_WRITE, 0x00, 0x01},
|
||||
{DEMOD_WRITE, 0xf1, 0xbf},
|
||||
{DEMOD_WRITE, 0x00, 0x01},
|
||||
{DEMOD_WRITE, 0x20, 0x81},
|
||||
{DEMOD_WRITE, 0x21, 0x80},
|
||||
|
|
@ -305,9 +369,6 @@ struct inittab fe_trigger[] = {
|
|||
{DEMOD_WRITE, 0x9b, 0x64},
|
||||
{DEMOD_WRITE, 0x9e, 0x00},
|
||||
{DEMOD_WRITE, 0x9f, 0xf8},
|
||||
{DEMOD_WRITE, 0xa0, 0x20},
|
||||
{DEMOD_WRITE, 0xa1, 0xe0},
|
||||
{DEMOD_WRITE, 0xa3, 0x38},
|
||||
{DEMOD_WRITE, 0x98, 0xff},
|
||||
{DEMOD_WRITE, 0xc0, 0x0f},
|
||||
{DEMOD_WRITE, 0x89, 0x01},
|
||||
|
|
@ -540,9 +601,8 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
|
|||
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
|
||||
fe_status_t status;
|
||||
int i, ret = 0;
|
||||
s32 tmp;
|
||||
u32 tuner_freq;
|
||||
u16 offset = 0;
|
||||
s16 offset = 0;
|
||||
u8 reg;
|
||||
|
||||
state->no_lock_count = 0;
|
||||
|
|
@ -567,29 +627,26 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
|
|||
if (ret < 0)
|
||||
return -ENODEV;
|
||||
|
||||
offset = tuner_freq - c->frequency;
|
||||
offset = (s16)((s32)tuner_freq - c->frequency);
|
||||
|
||||
/* calculate offset assuming 96000kHz*/
|
||||
tmp = offset;
|
||||
tmp *= 65536;
|
||||
/* default mclk value 96.4285 * 2 * 1000 = 192857 */
|
||||
if (((c->frequency % 192857) >= (192857 - 3000)) ||
|
||||
(c->frequency % 192857) <= 3000)
|
||||
ret = m88rs2000_writereg(state, 0x86, 0xc2);
|
||||
else
|
||||
ret = m88rs2000_writereg(state, 0x86, 0xc6);
|
||||
|
||||
tmp = (2 * tmp + 96000) / (2 * 96000);
|
||||
if (tmp < 0)
|
||||
tmp += 65536;
|
||||
ret |= m88rs2000_set_carrieroffset(fe, offset);
|
||||
if (ret < 0)
|
||||
return -ENODEV;
|
||||
|
||||
offset = tmp & 0xffff;
|
||||
/* Reset demod by symbol rate */
|
||||
if (c->symbol_rate > 27500000)
|
||||
ret = m88rs2000_writereg(state, 0xf1, 0xa4);
|
||||
else
|
||||
ret = m88rs2000_writereg(state, 0xf1, 0xbf);
|
||||
|
||||
ret = m88rs2000_writereg(state, 0x9a, 0x30);
|
||||
/* Unknown usually 0xc6 sometimes 0xc1 */
|
||||
reg = m88rs2000_readreg(state, 0x86);
|
||||
ret |= m88rs2000_writereg(state, 0x86, reg);
|
||||
/* Offset lower nibble always 0 */
|
||||
ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
|
||||
ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
|
||||
|
||||
|
||||
/* Reset Demod */
|
||||
ret = m88rs2000_tab_set(state, fe_reset);
|
||||
ret |= m88rs2000_tab_set(state, fe_reset);
|
||||
if (ret < 0)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
|||
|
|
@ -53,6 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
|
|||
}
|
||||
#endif /* CONFIG_DVB_M88RS2000 */
|
||||
|
||||
#define RS2000_FE_CRYSTAL_KHZ 27000
|
||||
|
||||
enum {
|
||||
DEMOD_WRITE = 0x1,
|
||||
WRITE_DELAY = 0x10,
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
/* Max transfer size done by I2C transfer functions */
|
||||
#define MAX_XFER_SIZE 64
|
||||
#define MAX_XFER_SIZE 256
|
||||
|
||||
#define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
|
||||
#define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
|
||||
|
|
|
|||
|
|
@ -177,21 +177,6 @@ unlock:
|
|||
mutex_unlock(&dev->mfc_mutex);
|
||||
}
|
||||
|
||||
static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (!vdev) {
|
||||
mfc_err("failed to get video_device");
|
||||
return MFCNODE_INVALID;
|
||||
}
|
||||
if (vdev->index == 0)
|
||||
return MFCNODE_DECODER;
|
||||
else if (vdev->index == 1)
|
||||
return MFCNODE_ENCODER;
|
||||
return MFCNODE_INVALID;
|
||||
}
|
||||
|
||||
static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
|
||||
{
|
||||
mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
|
||||
|
|
@ -701,6 +686,7 @@ irq_cleanup_hw:
|
|||
/* Open an MFC node */
|
||||
static int s5p_mfc_open(struct file *file)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct s5p_mfc_dev *dev = video_drvdata(file);
|
||||
struct s5p_mfc_ctx *ctx = NULL;
|
||||
struct vb2_queue *q;
|
||||
|
|
@ -738,7 +724,7 @@ static int s5p_mfc_open(struct file *file)
|
|||
/* Mark context as idle */
|
||||
clear_work_bit_irqsave(ctx);
|
||||
dev->ctx[ctx->num] = ctx;
|
||||
if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
|
||||
if (vdev == dev->vfd_dec) {
|
||||
ctx->type = MFCINST_DECODER;
|
||||
ctx->c_ops = get_dec_codec_ops();
|
||||
s5p_mfc_dec_init(ctx);
|
||||
|
|
@ -748,7 +734,7 @@ static int s5p_mfc_open(struct file *file)
|
|||
mfc_err("Failed to setup mfc controls\n");
|
||||
goto err_ctrls_setup;
|
||||
}
|
||||
} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
|
||||
} else if (vdev == dev->vfd_enc) {
|
||||
ctx->type = MFCINST_ENCODER;
|
||||
ctx->c_ops = get_enc_codec_ops();
|
||||
/* only for encoder */
|
||||
|
|
@ -793,10 +779,10 @@ static int s5p_mfc_open(struct file *file)
|
|||
q = &ctx->vq_dst;
|
||||
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
||||
q->drv_priv = &ctx->fh;
|
||||
if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
|
||||
if (vdev == dev->vfd_dec) {
|
||||
q->io_modes = VB2_MMAP;
|
||||
q->ops = get_dec_queue_ops();
|
||||
} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
|
||||
} else if (vdev == dev->vfd_enc) {
|
||||
q->io_modes = VB2_MMAP | VB2_USERPTR;
|
||||
q->ops = get_enc_queue_ops();
|
||||
} else {
|
||||
|
|
@ -815,10 +801,10 @@ static int s5p_mfc_open(struct file *file)
|
|||
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
|
||||
q->io_modes = VB2_MMAP;
|
||||
q->drv_priv = &ctx->fh;
|
||||
if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
|
||||
if (vdev == dev->vfd_dec) {
|
||||
q->io_modes = VB2_MMAP;
|
||||
q->ops = get_dec_queue_ops();
|
||||
} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
|
||||
} else if (vdev == dev->vfd_enc) {
|
||||
q->io_modes = VB2_MMAP | VB2_USERPTR;
|
||||
q->ops = get_enc_queue_ops();
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -112,15 +112,6 @@ enum s5p_mfc_fmt_type {
|
|||
MFC_FMT_RAW,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum s5p_mfc_node_type - The type of an MFC device node.
|
||||
*/
|
||||
enum s5p_mfc_node_type {
|
||||
MFCNODE_INVALID = -1,
|
||||
MFCNODE_DECODER = 0,
|
||||
MFCNODE_ENCODER = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum s5p_mfc_inst_type - The type of an MFC instance.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -442,6 +442,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
|
|||
* IOD[0] ZL10353 1=enabled
|
||||
* IOE[0] tuner 0=enabled
|
||||
* tuner is behind ZL10353 I2C-gate
|
||||
* tuner is behind TDA10023 I2C-gate
|
||||
*
|
||||
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
|
||||
* PCB: 508TC (rev0.6)
|
||||
|
|
@ -956,7 +957,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
|
|||
|
||||
if (fe && adap->fe[1]) {
|
||||
/* attach tuner for 2nd FE */
|
||||
fe = dvb_attach(dvb_pll_attach, adap->fe[0],
|
||||
fe = dvb_attach(dvb_pll_attach, adap->fe[1],
|
||||
(0xc0 >> 1), &d->i2c_adap,
|
||||
DVB_PLL_SAMSUNG_DTOS403IH102A);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,6 +51,8 @@
|
|||
* document number TBD : Lynx Point
|
||||
* document number TBD : Lynx Point-LP
|
||||
* document number TBD : Wellsburg
|
||||
* document number TBD : Avoton SoC
|
||||
* document number TBD : Coleto Creek
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
|
@ -207,6 +209,8 @@ enum lpc_chipsets {
|
|||
LPC_LPT, /* Lynx Point */
|
||||
LPC_LPT_LP, /* Lynx Point-LP */
|
||||
LPC_WBG, /* Wellsburg */
|
||||
LPC_AVN, /* Avoton SoC */
|
||||
LPC_COLETO, /* Coleto Creek */
|
||||
};
|
||||
|
||||
struct lpc_ich_info lpc_chipset_info[] = {
|
||||
|
|
@ -491,6 +495,14 @@ struct lpc_ich_info lpc_chipset_info[] = {
|
|||
.name = "Wellsburg",
|
||||
.iTCO_version = 2,
|
||||
},
|
||||
[LPC_AVN] = {
|
||||
.name = "Avoton SoC",
|
||||
.iTCO_version = 1,
|
||||
},
|
||||
[LPC_COLETO] = {
|
||||
.name = "Coleto Creek",
|
||||
.iTCO_version = 2,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -704,6 +716,11 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
|
|||
{ PCI_VDEVICE(INTEL, 0x8d5d), LPC_WBG},
|
||||
{ PCI_VDEVICE(INTEL, 0x8d5e), LPC_WBG},
|
||||
{ PCI_VDEVICE(INTEL, 0x8d5f), LPC_WBG},
|
||||
{ PCI_VDEVICE(INTEL, 0x1f38), LPC_AVN},
|
||||
{ PCI_VDEVICE(INTEL, 0x1f39), LPC_AVN},
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
|
||||
{ PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
|
||||
{ 0, }, /* End of list */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
|
||||
|
|
|
|||
|
|
@ -1931,6 +1931,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
struct mmc_card *card = md->queue.card;
|
||||
struct mmc_host *host = card->host;
|
||||
unsigned long flags;
|
||||
unsigned int cmd_flags = req ? req->cmd_flags : 0;
|
||||
|
||||
if (req && !mq->mqrq_prev->req)
|
||||
/* claim host only for the first request */
|
||||
|
|
@ -1946,7 +1947,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
}
|
||||
|
||||
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
|
||||
if (req && req->cmd_flags & REQ_DISCARD) {
|
||||
if (cmd_flags & REQ_DISCARD) {
|
||||
/* complete ongoing async transfer before issuing discard */
|
||||
if (card->host->areq)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
|
|
@ -1955,7 +1956,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
ret = mmc_blk_issue_secdiscard_rq(mq, req);
|
||||
else
|
||||
ret = mmc_blk_issue_discard_rq(mq, req);
|
||||
} else if (req && req->cmd_flags & REQ_FLUSH) {
|
||||
} else if (cmd_flags & REQ_FLUSH) {
|
||||
/* complete ongoing async transfer before issuing flush */
|
||||
if (card->host->areq)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
|
|
@ -1971,7 +1972,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
|
||||
out:
|
||||
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
|
||||
(req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
|
||||
(cmd_flags & MMC_REQ_SPECIAL_MASK))
|
||||
/*
|
||||
* Release host when there are no more requests
|
||||
* and after special request(discard, flush) is done.
|
||||
|
|
|
|||
|
|
@ -1188,11 +1188,22 @@ static void atmci_start_request(struct atmel_mci *host,
|
|||
iflags |= ATMCI_CMDRDY;
|
||||
cmd = mrq->cmd;
|
||||
cmdflags = atmci_prepare_command(slot->mmc, cmd);
|
||||
atmci_send_command(host, cmd, cmdflags);
|
||||
|
||||
/*
|
||||
* DMA transfer should be started before sending the command to avoid
|
||||
* unexpected errors especially for read operations in SDIO mode.
|
||||
* Unfortunately, in PDC mode, command has to be sent before starting
|
||||
* the transfer.
|
||||
*/
|
||||
if (host->submit_data != &atmci_submit_data_dma)
|
||||
atmci_send_command(host, cmd, cmdflags);
|
||||
|
||||
if (data)
|
||||
host->submit_data(host, data);
|
||||
|
||||
if (host->submit_data == &atmci_submit_data_dma)
|
||||
atmci_send_command(host, cmd, cmdflags);
|
||||
|
||||
if (mrq->stop) {
|
||||
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
|
||||
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
|
||||
|
|
|
|||
|
|
@ -676,7 +676,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
|
|||
ecc_stat >>= 4;
|
||||
} while (--no_subpages);
|
||||
|
||||
mtd->ecc_stats.corrected += ret;
|
||||
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
|
||||
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -34,11 +34,11 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
|
||||
#include <asm-generic/rtc.h>
|
||||
|
|
@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
|
||||
*/
|
||||
static bool alarm_disable_quirk;
|
||||
|
||||
static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
|
||||
{
|
||||
alarm_disable_quirk = true;
|
||||
pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
|
||||
pr_info("RTC alarms disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id rtc_quirks[] __initconst = {
|
||||
/* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
|
||||
{
|
||||
.callback = set_alarm_disable_quirk,
|
||||
.ident = "IBM Truman",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
|
||||
},
|
||||
},
|
||||
/* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
|
||||
{
|
||||
.callback = set_alarm_disable_quirk,
|
||||
.ident = "Gigabyte GA-990XA-UD3",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR,
|
||||
"Gigabyte Technology Co., Ltd."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
|
||||
},
|
||||
},
|
||||
/* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
|
||||
{
|
||||
.callback = set_alarm_disable_quirk,
|
||||
.ident = "Toshiba Satellite L300",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
|
||||
{
|
||||
struct cmos_rtc *cmos = dev_get_drvdata(dev);
|
||||
|
|
@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
|
|||
if (!is_valid_irq(cmos->irq))
|
||||
return -EINVAL;
|
||||
|
||||
if (alarm_disable_quirk)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
|
||||
if (enabled)
|
||||
|
|
@ -1163,6 +1211,8 @@ static int __init cmos_init(void)
|
|||
platform_driver_registered = true;
|
||||
}
|
||||
|
||||
dmi_check_system(rtc_quirks);
|
||||
|
||||
if (retval == 0)
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -180,8 +180,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
|
|||
transfer_list);
|
||||
}
|
||||
|
||||
len -= prepend_len;
|
||||
|
||||
init_completion(&bs->done);
|
||||
|
||||
/* Fill in the Message control register */
|
||||
|
|
|
|||
|
|
@ -584,7 +584,9 @@ static void spi_pump_messages(struct kthread_work *work)
|
|||
ret = master->transfer_one_message(master, master->cur_msg);
|
||||
if (ret) {
|
||||
dev_err(&master->dev,
|
||||
"failed to transfer one message from queue\n");
|
||||
"failed to transfer one message from queue: %d\n", ret);
|
||||
master->cur_msg->status = ret;
|
||||
spi_finalize_current_message(master);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ out:
|
|||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, size_t, len)
|
||||
COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
|
|||
|
||||
layout->max_io_length =
|
||||
(BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
|
||||
layout->group_width;
|
||||
(layout->group_width - layout->parity);
|
||||
if (layout->parity) {
|
||||
unsigned stripe_length =
|
||||
(layout->group_width - layout->parity) *
|
||||
|
|
@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
|
|||
if (length) {
|
||||
ore_calc_stripe_info(layout, offset, length, &ios->si);
|
||||
ios->length = ios->si.length;
|
||||
ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
|
||||
ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
if (layout->parity)
|
||||
_ore_post_alloc_raid_stuff(ios);
|
||||
}
|
||||
|
|
@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
|
|||
u64 H = LmodS - G * T;
|
||||
|
||||
u32 N = div_u64(H, U);
|
||||
u32 Nlast;
|
||||
|
||||
/* "H - (N * U)" is just "H % U" so it's bound to u32 */
|
||||
u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
|
||||
|
|
@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
|
|||
si->length = T - H;
|
||||
if (si->length > length)
|
||||
si->length = length;
|
||||
|
||||
Nlast = div_u64(H + si->length + U - 1, U);
|
||||
si->maxdevUnits = Nlast - N;
|
||||
|
||||
si->M = M;
|
||||
}
|
||||
EXPORT_SYMBOL(ore_calc_stripe_info);
|
||||
|
|
@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
|
|||
int ret;
|
||||
|
||||
if (per_dev->bio == NULL) {
|
||||
unsigned pages_in_stripe = ios->layout->group_width *
|
||||
(ios->layout->stripe_unit / PAGE_SIZE);
|
||||
unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
|
||||
(ios->layout->group_width -
|
||||
ios->layout->parity);
|
||||
unsigned bio_size = (nr_pages + pages_in_stripe) /
|
||||
ios->layout->group_width;
|
||||
unsigned bio_size;
|
||||
|
||||
if (!ios->reading) {
|
||||
bio_size = ios->si.maxdevUnits;
|
||||
} else {
|
||||
bio_size = (ios->si.maxdevUnits + 1) *
|
||||
(ios->layout->group_width - ios->layout->parity) /
|
||||
ios->layout->group_width;
|
||||
}
|
||||
bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
|
||||
|
||||
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
|
||||
if (unlikely(!per_dev->bio)) {
|
||||
|
|
@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
|
|||
added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
|
||||
pglen, pgbase);
|
||||
if (unlikely(pglen != added_len)) {
|
||||
ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
|
||||
per_dev->bio->bi_vcnt);
|
||||
/* If bi_vcnt == bi_max then this is a SW BUG */
|
||||
ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
|
||||
"bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
|
||||
per_dev->bio->bi_vcnt,
|
||||
per_dev->bio->bi_max_vecs,
|
||||
BIO_MAX_PAGES_KMALLOC, cur_len);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -1098,7 +1111,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
|
|||
size_attr->attr = g_attr_logical_length;
|
||||
size_attr->attr.val_ptr = &size_attr->newsize;
|
||||
|
||||
ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
|
||||
ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
|
||||
_LLU(oc->comps->obj.id), _LLU(obj_size), i);
|
||||
ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
|
||||
&size_attr->attr);
|
||||
|
|
|
|||
|
|
@ -1296,22 +1296,6 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
|
|||
return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
|
||||
}
|
||||
|
||||
static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
|
||||
.can_merge = 0,
|
||||
.map = generic_pipe_buf_map,
|
||||
.unmap = generic_pipe_buf_unmap,
|
||||
.confirm = generic_pipe_buf_confirm,
|
||||
.release = generic_pipe_buf_release,
|
||||
.steal = fuse_dev_pipe_buf_steal,
|
||||
.get = generic_pipe_buf_get,
|
||||
};
|
||||
|
||||
static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe,
|
||||
size_t len, unsigned int flags)
|
||||
|
|
@ -1358,7 +1342,11 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
|
|||
buf->page = bufs[page_nr].page;
|
||||
buf->offset = bufs[page_nr].offset;
|
||||
buf->len = bufs[page_nr].len;
|
||||
buf->ops = &fuse_dev_pipe_buf_ops;
|
||||
/*
|
||||
* Need to be careful about this. Having buf->ops in module
|
||||
* code can Oops if the buffer persists after module unload.
|
||||
*/
|
||||
buf->ops = &nosteal_pipe_buf_ops;
|
||||
|
||||
pipe->nrbufs++;
|
||||
page_nr++;
|
||||
|
|
|
|||
|
|
@ -240,13 +240,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
|
|||
error = nfs4_discover_server_trunking(clp, &old);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
nfs_put_client(clp);
|
||||
if (clp != old) {
|
||||
clp->cl_preserve_clid = true;
|
||||
clp = old;
|
||||
}
|
||||
|
||||
return clp;
|
||||
if (clp != old)
|
||||
clp->cl_preserve_clid = true;
|
||||
nfs_put_client(clp);
|
||||
return old;
|
||||
|
||||
error:
|
||||
nfs_mark_client_ready(clp, error);
|
||||
|
|
@ -324,9 +322,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
|
|||
prev = pos;
|
||||
|
||||
status = nfs_wait_client_init_complete(pos);
|
||||
spin_lock(&nn->nfs_client_lock);
|
||||
if (status < 0)
|
||||
continue;
|
||||
goto out;
|
||||
status = -NFS4ERR_STALE_CLIENTID;
|
||||
spin_lock(&nn->nfs_client_lock);
|
||||
}
|
||||
if (pos->cl_cons_state != NFS_CS_READY)
|
||||
continue;
|
||||
|
|
@ -464,7 +463,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
|
|||
}
|
||||
spin_lock(&nn->nfs_client_lock);
|
||||
if (status < 0)
|
||||
continue;
|
||||
break;
|
||||
status = -NFS4ERR_STALE_CLIENTID;
|
||||
}
|
||||
if (pos->cl_cons_state != NFS_CS_READY)
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -6232,9 +6232,9 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
|
|||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct pnfs_layout_hdr *lo;
|
||||
struct nfs4_state *state = NULL;
|
||||
unsigned long timeo, giveup;
|
||||
unsigned long timeo, now, giveup;
|
||||
|
||||
dprintk("--> %s\n", __func__);
|
||||
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
|
||||
|
||||
if (!nfs41_sequence_done(task, &lgp->res.seq_res))
|
||||
goto out;
|
||||
|
|
@ -6242,12 +6242,38 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
|
|||
switch (task->tk_status) {
|
||||
case 0:
|
||||
goto out;
|
||||
/*
|
||||
* NFS4ERR_LAYOUTTRYLATER is a conflict with another client
|
||||
* (or clients) writing to the same RAID stripe
|
||||
*/
|
||||
case -NFS4ERR_LAYOUTTRYLATER:
|
||||
/*
|
||||
* NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
|
||||
* existing layout before getting a new one).
|
||||
*/
|
||||
case -NFS4ERR_RECALLCONFLICT:
|
||||
timeo = rpc_get_timeout(task->tk_client);
|
||||
giveup = lgp->args.timestamp + timeo;
|
||||
if (time_after(giveup, jiffies))
|
||||
task->tk_status = -NFS4ERR_DELAY;
|
||||
now = jiffies;
|
||||
if (time_after(giveup, now)) {
|
||||
unsigned long delay;
|
||||
|
||||
/* Delay for:
|
||||
* - Not less then NFS4_POLL_RETRY_MIN.
|
||||
* - One last time a jiffie before we give up
|
||||
* - exponential backoff (time_now minus start_attempt)
|
||||
*/
|
||||
delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
|
||||
min((giveup - now - 1),
|
||||
now - lgp->args.timestamp));
|
||||
|
||||
dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
|
||||
__func__, delay);
|
||||
rpc_delay(task, delay);
|
||||
task->tk_status = 0;
|
||||
rpc_restart_call_prepare(task);
|
||||
goto out; /* Do not call nfs4_async_handle_error() */
|
||||
}
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
|
|
@ -6683,7 +6709,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
switch (err) {
|
||||
case 0:
|
||||
case -NFS4ERR_WRONGSEC:
|
||||
case -NFS4ERR_NOTSUPP:
|
||||
case -ENOTSUPP:
|
||||
goto out;
|
||||
default:
|
||||
err = nfs4_handle_exception(server, err, &exception);
|
||||
|
|
@ -6715,7 +6741,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
* Fall back on "guess and check" method if
|
||||
* the server doesn't support SECINFO_NO_NAME
|
||||
*/
|
||||
if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
|
||||
if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
|
||||
err = nfs4_find_root_sec(server, fhandle, info);
|
||||
goto out_freepage;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3002,7 +3002,8 @@ out_overflow:
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
|
||||
static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
|
||||
int *nfs_retval)
|
||||
{
|
||||
__be32 *p;
|
||||
uint32_t opnum;
|
||||
|
|
@ -3012,19 +3013,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
|
|||
if (unlikely(!p))
|
||||
goto out_overflow;
|
||||
opnum = be32_to_cpup(p++);
|
||||
if (opnum != expected) {
|
||||
dprintk("nfs: Server returned operation"
|
||||
" %d but we issued a request for %d\n",
|
||||
opnum, expected);
|
||||
return -EIO;
|
||||
}
|
||||
if (unlikely(opnum != expected))
|
||||
goto out_bad_operation;
|
||||
nfserr = be32_to_cpup(p);
|
||||
if (nfserr != NFS_OK)
|
||||
return nfs4_stat_to_errno(nfserr);
|
||||
return 0;
|
||||
if (nfserr == NFS_OK)
|
||||
*nfs_retval = 0;
|
||||
else
|
||||
*nfs_retval = nfs4_stat_to_errno(nfserr);
|
||||
return true;
|
||||
out_bad_operation:
|
||||
dprintk("nfs: Server returned operation"
|
||||
" %d but we issued a request for %d\n",
|
||||
opnum, expected);
|
||||
*nfs_retval = -EREMOTEIO;
|
||||
return false;
|
||||
out_overflow:
|
||||
print_overflow_msg(__func__, xdr);
|
||||
return -EIO;
|
||||
*nfs_retval = -EIO;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
|
||||
{
|
||||
int retval;
|
||||
|
||||
__decode_op_hdr(xdr, expected, &retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* Dummy routine */
|
||||
|
|
@ -4842,11 +4856,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
|
|||
uint32_t savewords, bmlen, i;
|
||||
int status;
|
||||
|
||||
status = decode_op_hdr(xdr, OP_OPEN);
|
||||
if (status != -EIO)
|
||||
nfs_increment_open_seqid(status, res->seqid);
|
||||
if (!status)
|
||||
status = decode_stateid(xdr, &res->stateid);
|
||||
if (!__decode_op_hdr(xdr, OP_OPEN, &status))
|
||||
return status;
|
||||
nfs_increment_open_seqid(status, res->seqid);
|
||||
if (status)
|
||||
return status;
|
||||
status = decode_stateid(xdr, &res->stateid);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
|
||||
|
|
|
|||
|
|
@ -867,9 +867,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
|
|||
{
|
||||
return sys_fanotify_mark(fanotify_fd, flags,
|
||||
#ifdef __BIG_ENDIAN
|
||||
((__u64)mask1 << 32) | mask0,
|
||||
#else
|
||||
((__u64)mask0 << 32) | mask1,
|
||||
#else
|
||||
((__u64)mask1 << 32) | mask0,
|
||||
#endif
|
||||
dfd, pathname);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -947,9 +947,9 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
|
||||
COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
|
||||
const struct compat_iovec __user *,vec,
|
||||
unsigned long, vlen)
|
||||
compat_ulong_t, vlen)
|
||||
{
|
||||
struct fd f = fdget(fd);
|
||||
ssize_t ret;
|
||||
|
|
@ -983,9 +983,9 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
|
||||
COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
|
||||
const struct compat_iovec __user *,vec,
|
||||
unsigned long, vlen, u32, pos_low, u32, pos_high)
|
||||
compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
|
||||
{
|
||||
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
|
||||
return compat_sys_preadv64(fd, vec, vlen, pos);
|
||||
|
|
@ -1013,9 +1013,9 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
|
||||
COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
|
||||
const struct compat_iovec __user *, vec,
|
||||
unsigned long, vlen)
|
||||
compat_ulong_t, vlen)
|
||||
{
|
||||
struct fd f = fdget(fd);
|
||||
ssize_t ret;
|
||||
|
|
@ -1049,9 +1049,9 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
|
||||
COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
|
||||
const struct compat_iovec __user *,vec,
|
||||
unsigned long, vlen, u32, pos_low, u32, pos_high)
|
||||
compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
|
||||
{
|
||||
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
|
||||
return compat_sys_pwritev64(fd, vec, vlen, pos);
|
||||
|
|
|
|||
18
fs/splice.c
18
fs/splice.c
|
|
@ -555,6 +555,24 @@ static const struct pipe_buf_operations default_pipe_buf_ops = {
|
|||
.get = generic_pipe_buf_get,
|
||||
};
|
||||
|
||||
static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Pipe buffer operations for a socket and similar. */
|
||||
const struct pipe_buf_operations nosteal_pipe_buf_ops = {
|
||||
.can_merge = 0,
|
||||
.map = generic_pipe_buf_map,
|
||||
.unmap = generic_pipe_buf_unmap,
|
||||
.confirm = generic_pipe_buf_confirm,
|
||||
.release = generic_pipe_buf_release,
|
||||
.steal = generic_pipe_buf_nosteal,
|
||||
.get = generic_pipe_buf_get,
|
||||
};
|
||||
EXPORT_SYMBOL(nosteal_pipe_buf_ops);
|
||||
|
||||
static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
|
||||
unsigned long vlen, loff_t offset)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ static inline void audit_syscall_exit(void *pt_regs)
|
|||
{
|
||||
if (unlikely(current->audit_context)) {
|
||||
int success = is_syscall_success(pt_regs);
|
||||
int return_code = regs_return_value(pt_regs);
|
||||
long return_code = regs_return_value(pt_regs);
|
||||
|
||||
__audit_syscall_exit(success, return_code);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -326,16 +326,16 @@ asmlinkage long compat_sys_keyctl(u32 option,
|
|||
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
|
||||
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
|
||||
|
||||
asmlinkage ssize_t compat_sys_readv(unsigned long fd,
|
||||
const struct compat_iovec __user *vec, unsigned long vlen);
|
||||
asmlinkage ssize_t compat_sys_writev(unsigned long fd,
|
||||
const struct compat_iovec __user *vec, unsigned long vlen);
|
||||
asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
|
||||
asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
|
||||
const struct compat_iovec __user *vec, compat_ulong_t vlen);
|
||||
asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
|
||||
const struct compat_iovec __user *vec, compat_ulong_t vlen);
|
||||
asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
|
||||
const struct compat_iovec __user *vec,
|
||||
unsigned long vlen, u32 pos_low, u32 pos_high);
|
||||
asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
|
||||
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
|
||||
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
|
||||
const struct compat_iovec __user *vec,
|
||||
unsigned long vlen, u32 pos_low, u32 pos_high);
|
||||
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
|
||||
asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
|
||||
|
||||
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
|
||||
|
|
@ -421,7 +421,7 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
||||
compat_long_t addr, compat_long_t data);
|
||||
|
||||
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
|
||||
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
|
||||
/*
|
||||
* epoll (fs/eventpoll.c) compat bits follow ...
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -157,6 +157,8 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
|
|||
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
|
||||
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
|
||||
|
||||
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
|
||||
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
|
||||
struct pipe_inode_info *get_pipe_info(struct file *file);
|
||||
|
|
|
|||
|
|
@ -142,9 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
|||
return x;
|
||||
}
|
||||
|
||||
extern unsigned long global_reclaimable_pages(void);
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* Determine the per node value of a stat item. This function
|
||||
|
|
|
|||
|
|
@ -102,6 +102,7 @@ struct ore_striping_info {
|
|||
unsigned unit_off;
|
||||
unsigned cur_pg;
|
||||
unsigned cur_comp;
|
||||
unsigned maxdevUnits;
|
||||
};
|
||||
|
||||
struct ore_io_state;
|
||||
|
|
|
|||
|
|
@ -103,7 +103,8 @@ static int audit_rate_limit;
|
|||
|
||||
/* Number of outstanding audit_buffers allowed. */
|
||||
static int audit_backlog_limit = 64;
|
||||
static int audit_backlog_wait_time = 60 * HZ;
|
||||
#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
|
||||
static int audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
|
||||
static int audit_backlog_wait_overflow = 0;
|
||||
|
||||
/* The identity of the user shutting down the audit system. */
|
||||
|
|
@ -1135,6 +1136,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
|
||||
|
||||
ab = audit_buffer_alloc(ctx, gfp_mask, type);
|
||||
if (!ab) {
|
||||
audit_log_lost("out of memory in audit_log_start");
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
|
|||
tk->wall_to_monotonic = wtm;
|
||||
set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
|
||||
tk->offs_real = timespec_to_ktime(tmp);
|
||||
tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
|
||||
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
|
||||
}
|
||||
|
||||
static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
|
||||
|
|
@ -590,7 +590,7 @@ s32 timekeeping_get_tai_offset(void)
|
|||
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
|
||||
{
|
||||
tk->tai_offset = tai_offset;
|
||||
tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
|
||||
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -605,6 +605,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
|
|||
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
||||
write_seqcount_begin(&timekeeper_seq);
|
||||
__timekeeping_set_tai_offset(tk, tai_offset);
|
||||
timekeeping_update(tk, false, true);
|
||||
write_seqcount_end(&timekeeper_seq);
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
clock_was_set();
|
||||
|
|
@ -1007,6 +1008,8 @@ static int timekeeping_suspend(void)
|
|||
timekeeping_suspend_time =
|
||||
timespec_add(timekeeping_suspend_time, delta_delta);
|
||||
}
|
||||
|
||||
timekeeping_update(tk, false, true);
|
||||
write_seqcount_end(&timekeeper_seq);
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
|
||||
|
|
@ -1236,9 +1239,10 @@ out_adjust:
|
|||
* It also calls into the NTP code to handle leapsecond processing.
|
||||
*
|
||||
*/
|
||||
static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
|
||||
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
|
||||
{
|
||||
u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
|
||||
unsigned int clock_set = 0;
|
||||
|
||||
while (tk->xtime_nsec >= nsecps) {
|
||||
int leap;
|
||||
|
|
@ -1260,9 +1264,10 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
|
|||
|
||||
__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
|
||||
|
||||
clock_was_set_delayed();
|
||||
clock_set = 1;
|
||||
}
|
||||
}
|
||||
return clock_set;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1275,7 +1280,8 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
|
|||
* Returns the unconsumed cycles.
|
||||
*/
|
||||
static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
|
||||
u32 shift)
|
||||
u32 shift,
|
||||
unsigned int *clock_set)
|
||||
{
|
||||
cycle_t interval = tk->cycle_interval << shift;
|
||||
u64 raw_nsecs;
|
||||
|
|
@ -1289,7 +1295,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
|
|||
tk->cycle_last += interval;
|
||||
|
||||
tk->xtime_nsec += tk->xtime_interval << shift;
|
||||
accumulate_nsecs_to_secs(tk);
|
||||
*clock_set |= accumulate_nsecs_to_secs(tk);
|
||||
|
||||
/* Accumulate raw time */
|
||||
raw_nsecs = (u64)tk->raw_interval << shift;
|
||||
|
|
@ -1347,6 +1353,7 @@ static void update_wall_time(void)
|
|||
struct timekeeper *tk = &shadow_timekeeper;
|
||||
cycle_t offset;
|
||||
int shift = 0, maxshift;
|
||||
unsigned int clock_set = 0;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
||||
|
|
@ -1381,7 +1388,8 @@ static void update_wall_time(void)
|
|||
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
|
||||
shift = min(shift, maxshift);
|
||||
while (offset >= tk->cycle_interval) {
|
||||
offset = logarithmic_accumulation(tk, offset, shift);
|
||||
offset = logarithmic_accumulation(tk, offset, shift,
|
||||
&clock_set);
|
||||
if (offset < tk->cycle_interval<<shift)
|
||||
shift--;
|
||||
}
|
||||
|
|
@ -1399,7 +1407,7 @@ static void update_wall_time(void)
|
|||
* Finally, make sure that after the rounding
|
||||
* xtime_nsec isn't larger than NSEC_PER_SEC
|
||||
*/
|
||||
accumulate_nsecs_to_secs(tk);
|
||||
clock_set |= accumulate_nsecs_to_secs(tk);
|
||||
|
||||
write_seqcount_begin(&timekeeper_seq);
|
||||
/* Update clock->cycle_last with the new value */
|
||||
|
|
@ -1419,6 +1427,10 @@ static void update_wall_time(void)
|
|||
write_seqcount_end(&timekeeper_seq);
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
if (clock_set)
|
||||
/* have to call outside the timekeeper_seq */
|
||||
clock_was_set_delayed();
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1677,11 +1689,14 @@ int do_adjtimex(struct timex *txc)
|
|||
|
||||
if (tai != orig_tai) {
|
||||
__timekeeping_set_tai_offset(tk, tai);
|
||||
clock_was_set_delayed();
|
||||
timekeeping_update(tk, false, true);
|
||||
}
|
||||
write_seqcount_end(&timekeeper_seq);
|
||||
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
||||
|
||||
if (tai != orig_tai)
|
||||
clock_was_set();
|
||||
|
||||
ntp_notify_cmos_timer();
|
||||
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
|
|||
|
||||
/* Current function tracing op */
|
||||
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
|
||||
/* What to set function_trace_op to */
|
||||
static struct ftrace_ops *set_function_trace_op;
|
||||
|
||||
/* List for set_ftrace_pid's pids. */
|
||||
LIST_HEAD(ftrace_pids);
|
||||
|
|
@ -278,6 +280,29 @@ static void update_global_ops(void)
|
|||
global_ops.func = func;
|
||||
}
|
||||
|
||||
static void ftrace_sync(struct work_struct *work)
|
||||
{
|
||||
/*
|
||||
* This function is just a stub to implement a hard force
|
||||
* of synchronize_sched(). This requires synchronizing
|
||||
* tasks even in userspace and idle.
|
||||
*
|
||||
* Yes, function tracing is rude.
|
||||
*/
|
||||
}
|
||||
|
||||
static void ftrace_sync_ipi(void *data)
|
||||
{
|
||||
/* Probably not needed, but do it anyway */
|
||||
smp_rmb();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static void update_function_graph_func(void);
|
||||
#else
|
||||
static inline void update_function_graph_func(void) { }
|
||||
#endif
|
||||
|
||||
static void update_ftrace_function(void)
|
||||
{
|
||||
ftrace_func_t func;
|
||||
|
|
@ -296,16 +321,61 @@ static void update_ftrace_function(void)
|
|||
!FTRACE_FORCE_LIST_FUNC)) {
|
||||
/* Set the ftrace_ops that the arch callback uses */
|
||||
if (ftrace_ops_list == &global_ops)
|
||||
function_trace_op = ftrace_global_list;
|
||||
set_function_trace_op = ftrace_global_list;
|
||||
else
|
||||
function_trace_op = ftrace_ops_list;
|
||||
set_function_trace_op = ftrace_ops_list;
|
||||
func = ftrace_ops_list->func;
|
||||
} else {
|
||||
/* Just use the default ftrace_ops */
|
||||
function_trace_op = &ftrace_list_end;
|
||||
set_function_trace_op = &ftrace_list_end;
|
||||
func = ftrace_ops_list_func;
|
||||
}
|
||||
|
||||
/* If there's no change, then do nothing more here */
|
||||
if (ftrace_trace_function == func)
|
||||
return;
|
||||
|
||||
update_function_graph_func();
|
||||
|
||||
/*
|
||||
* If we are using the list function, it doesn't care
|
||||
* about the function_trace_ops.
|
||||
*/
|
||||
if (func == ftrace_ops_list_func) {
|
||||
ftrace_trace_function = func;
|
||||
/*
|
||||
* Don't even bother setting function_trace_ops,
|
||||
* it would be racy to do so anyway.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
/*
|
||||
* For static tracing, we need to be a bit more careful.
|
||||
* The function change takes affect immediately. Thus,
|
||||
* we need to coorditate the setting of the function_trace_ops
|
||||
* with the setting of the ftrace_trace_function.
|
||||
*
|
||||
* Set the function to the list ops, which will call the
|
||||
* function we want, albeit indirectly, but it handles the
|
||||
* ftrace_ops and doesn't depend on function_trace_op.
|
||||
*/
|
||||
ftrace_trace_function = ftrace_ops_list_func;
|
||||
/*
|
||||
* Make sure all CPUs see this. Yes this is slow, but static
|
||||
* tracing is slow and nasty to have enabled.
|
||||
*/
|
||||
schedule_on_each_cpu(ftrace_sync);
|
||||
/* Now all cpus are using the list ops. */
|
||||
function_trace_op = set_function_trace_op;
|
||||
/* Make sure the function_trace_op is visible on all CPUs */
|
||||
smp_wmb();
|
||||
/* Nasty way to force a rmb on all cpus */
|
||||
smp_call_function(ftrace_sync_ipi, NULL, 1);
|
||||
/* OK, we are all set to update the ftrace_trace_function now! */
|
||||
#endif /* !CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
ftrace_trace_function = func;
|
||||
}
|
||||
|
||||
|
|
@ -428,16 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|||
} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
|
||||
ret = remove_ftrace_list_ops(&ftrace_control_list,
|
||||
&control_ops, ops);
|
||||
if (!ret) {
|
||||
/*
|
||||
* The ftrace_ops is now removed from the list,
|
||||
* so there'll be no new users. We must ensure
|
||||
* all current users are done before we free
|
||||
* the control data.
|
||||
*/
|
||||
synchronize_sched();
|
||||
control_ops_free(ops);
|
||||
}
|
||||
} else
|
||||
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
|
||||
|
||||
|
|
@ -447,13 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|||
if (ftrace_enabled)
|
||||
update_ftrace_function();
|
||||
|
||||
/*
|
||||
* Dynamic ops may be freed, we must make sure that all
|
||||
* callers are done before leaving this function.
|
||||
*/
|
||||
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
|
||||
synchronize_sched();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1952,8 +2005,14 @@ void ftrace_modify_all_code(int command)
|
|||
else if (command & FTRACE_DISABLE_CALLS)
|
||||
ftrace_replace_code(0);
|
||||
|
||||
if (command & FTRACE_UPDATE_TRACE_FUNC)
|
||||
if (command & FTRACE_UPDATE_TRACE_FUNC) {
|
||||
function_trace_op = set_function_trace_op;
|
||||
smp_wmb();
|
||||
/* If irqs are disabled, we are in stop machine */
|
||||
if (!irqs_disabled())
|
||||
smp_call_function(ftrace_sync_ipi, NULL, 1);
|
||||
ftrace_update_ftrace_func(ftrace_trace_function);
|
||||
}
|
||||
|
||||
if (command & FTRACE_START_FUNC_RET)
|
||||
ftrace_enable_ftrace_graph_caller();
|
||||
|
|
@ -2116,10 +2175,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
}
|
||||
|
||||
if (!command || !ftrace_enabled)
|
||||
if (!command || !ftrace_enabled) {
|
||||
/*
|
||||
* If these are control ops, they still need their
|
||||
* per_cpu field freed. Since, function tracing is
|
||||
* not currently active, we can just free them
|
||||
* without synchronizing all CPUs.
|
||||
*/
|
||||
if (ops->flags & FTRACE_OPS_FL_CONTROL)
|
||||
control_ops_free(ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ftrace_run_update_code(command);
|
||||
|
||||
/*
|
||||
* Dynamic ops may be freed, we must make sure that all
|
||||
* callers are done before leaving this function.
|
||||
* The same goes for freeing the per_cpu data of the control
|
||||
* ops.
|
||||
*
|
||||
* Again, normal synchronize_sched() is not good enough.
|
||||
* We need to do a hard force of sched synchronization.
|
||||
* This is because we use preempt_disable() to do RCU, but
|
||||
* the function tracers can be called where RCU is not watching
|
||||
* (like before user_exit()). We can not rely on the RCU
|
||||
* infrastructure to do the synchronization, thus we must do it
|
||||
* ourselves.
|
||||
*/
|
||||
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
|
||||
schedule_on_each_cpu(ftrace_sync);
|
||||
|
||||
if (ops->flags & FTRACE_OPS_FL_CONTROL)
|
||||
control_ops_free(ops);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -4728,6 +4818,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
|
|||
trace_func_graph_ret_t ftrace_graph_return =
|
||||
(trace_func_graph_ret_t)ftrace_stub;
|
||||
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
|
||||
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
|
||||
|
||||
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
|
||||
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
||||
|
|
@ -4869,6 +4960,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
|
|||
FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
|
||||
{
|
||||
if (!ftrace_ops_test(&global_ops, trace->func, NULL))
|
||||
return 0;
|
||||
return __ftrace_graph_entry(trace);
|
||||
}
|
||||
|
||||
/*
|
||||
* The function graph tracer should only trace the functions defined
|
||||
* by set_ftrace_filter and set_ftrace_notrace. If another function
|
||||
* tracer ops is registered, the graph tracer requires testing the
|
||||
* function against the global ops, and not just trace any function
|
||||
* that any ftrace_ops registered.
|
||||
*/
|
||||
static void update_function_graph_func(void)
|
||||
{
|
||||
if (ftrace_ops_list == &ftrace_list_end ||
|
||||
(ftrace_ops_list == &global_ops &&
|
||||
global_ops.next == &ftrace_list_end))
|
||||
ftrace_graph_entry = __ftrace_graph_entry;
|
||||
else
|
||||
ftrace_graph_entry = ftrace_graph_entry_test;
|
||||
}
|
||||
|
||||
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
||||
trace_func_graph_ent_t entryfunc)
|
||||
{
|
||||
|
|
@ -4893,7 +5008,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|||
}
|
||||
|
||||
ftrace_graph_return = retfunc;
|
||||
ftrace_graph_entry = entryfunc;
|
||||
|
||||
/*
|
||||
* Update the indirect function to the entryfunc, and the
|
||||
* function that gets called to the entry_test first. Then
|
||||
* call the update fgraph entry function to determine if
|
||||
* the entryfunc should be called directly or not.
|
||||
*/
|
||||
__ftrace_graph_entry = entryfunc;
|
||||
ftrace_graph_entry = ftrace_graph_entry_test;
|
||||
update_function_graph_func();
|
||||
|
||||
ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
|
||||
|
||||
|
|
@ -4912,6 +5036,7 @@ void unregister_ftrace_graph(void)
|
|||
ftrace_graph_active--;
|
||||
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
|
||||
ftrace_graph_entry = ftrace_graph_entry_stub;
|
||||
__ftrace_graph_entry = ftrace_graph_entry_stub;
|
||||
ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
|
||||
unregister_pm_notifier(&ftrace_suspend_notifier);
|
||||
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
|
||||
|
|
|
|||
|
|
@ -424,6 +424,9 @@ int __trace_puts(unsigned long ip, const char *str, int size)
|
|||
unsigned long irq_flags;
|
||||
int alloc;
|
||||
|
||||
if (unlikely(tracing_selftest_running || tracing_disabled))
|
||||
return 0;
|
||||
|
||||
alloc = sizeof(*entry) + size + 2; /* possible \n added */
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
|
|
@ -464,6 +467,9 @@ int __trace_bputs(unsigned long ip, const char *str)
|
|||
unsigned long irq_flags;
|
||||
int size = sizeof(struct bputs_entry);
|
||||
|
||||
if (unlikely(tracing_selftest_running || tracing_disabled))
|
||||
return 0;
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
buffer = global_trace.trace_buffer.buffer;
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
|
||||
|
|
@ -5878,6 +5884,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
|
|||
|
||||
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
||||
|
||||
buf->tr = tr;
|
||||
|
||||
buf->buffer = ring_buffer_alloc(size, rb_flags);
|
||||
if (!buf->buffer)
|
||||
return -ENOMEM;
|
||||
|
|
|
|||
|
|
@ -854,14 +854,14 @@ static int page_action(struct page_state *ps, struct page *p,
|
|||
* the pages and send SIGBUS to the processes if the data was dirty.
|
||||
*/
|
||||
static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
int trapno, int flags)
|
||||
int trapno, int flags, struct page **hpagep)
|
||||
{
|
||||
enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
|
||||
struct address_space *mapping;
|
||||
LIST_HEAD(tokill);
|
||||
int ret;
|
||||
int kill = 1, forcekill;
|
||||
struct page *hpage = compound_head(p);
|
||||
struct page *hpage = *hpagep;
|
||||
struct page *ppage;
|
||||
|
||||
if (PageReserved(p) || PageSlab(p))
|
||||
|
|
@ -940,11 +940,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|||
* We pinned the head page for hwpoison handling,
|
||||
* now we split the thp and we are interested in
|
||||
* the hwpoisoned raw page, so move the refcount
|
||||
* to it.
|
||||
* to it. Similarly, page lock is shifted.
|
||||
*/
|
||||
if (hpage != p) {
|
||||
put_page(hpage);
|
||||
get_page(p);
|
||||
lock_page(p);
|
||||
unlock_page(hpage);
|
||||
*hpagep = p;
|
||||
}
|
||||
/* THP is split, so ppage should be the real poisoned page. */
|
||||
ppage = p;
|
||||
|
|
@ -962,17 +965,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|||
if (kill)
|
||||
collect_procs(ppage, &tokill);
|
||||
|
||||
if (hpage != ppage)
|
||||
lock_page(ppage);
|
||||
|
||||
ret = try_to_unmap(ppage, ttu);
|
||||
if (ret != SWAP_SUCCESS)
|
||||
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
|
||||
pfn, page_mapcount(ppage));
|
||||
|
||||
if (hpage != ppage)
|
||||
unlock_page(ppage);
|
||||
|
||||
/*
|
||||
* Now that the dirty bit has been propagated to the
|
||||
* struct page and all unmaps done we can decide if
|
||||
|
|
@ -1189,8 +1186,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|||
/*
|
||||
* Now take care of user space mappings.
|
||||
* Abort on fail: __delete_from_page_cache() assumes unmapped page.
|
||||
*
|
||||
* When the raw error page is thp tail page, hpage points to the raw
|
||||
* page after thp split.
|
||||
*/
|
||||
if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) {
|
||||
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
|
||||
!= SWAP_SUCCESS) {
|
||||
printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
|
||||
res = -EBUSY;
|
||||
goto out;
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
|
|||
* implementation used by LSMs.
|
||||
*/
|
||||
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
|
||||
adj -= 30;
|
||||
points -= (points * 3) / 100;
|
||||
|
||||
/* Normalize to oom_score_adj units */
|
||||
adj *= totalpages / 1000;
|
||||
|
|
|
|||
|
|
@ -188,6 +188,26 @@ static unsigned long writeout_period_time = 0;
|
|||
* global dirtyable memory first.
|
||||
*/
|
||||
|
||||
/**
|
||||
* zone_dirtyable_memory - number of dirtyable pages in a zone
|
||||
* @zone: the zone
|
||||
*
|
||||
* Returns the zone's number of pages potentially available for dirty
|
||||
* page cache. This is the base value for the per-zone dirty limits.
|
||||
*/
|
||||
static unsigned long zone_dirtyable_memory(struct zone *zone)
|
||||
{
|
||||
unsigned long nr_pages;
|
||||
|
||||
nr_pages = zone_page_state(zone, NR_FREE_PAGES);
|
||||
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
|
||||
|
||||
nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
|
||||
nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
|
||||
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
static unsigned long highmem_dirtyable_memory(unsigned long total)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
|
@ -195,11 +215,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
|
|||
unsigned long x = 0;
|
||||
|
||||
for_each_node_state(node, N_HIGH_MEMORY) {
|
||||
struct zone *z =
|
||||
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
|
||||
struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
|
||||
|
||||
x += zone_page_state(z, NR_FREE_PAGES) +
|
||||
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
|
||||
x += zone_dirtyable_memory(z);
|
||||
}
|
||||
/*
|
||||
* Unreclaimable memory (kernel memory or anonymous memory
|
||||
|
|
@ -235,9 +253,12 @@ static unsigned long global_dirtyable_memory(void)
|
|||
{
|
||||
unsigned long x;
|
||||
|
||||
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
|
||||
x = global_page_state(NR_FREE_PAGES);
|
||||
x -= min(x, dirty_balance_reserve);
|
||||
|
||||
x += global_page_state(NR_INACTIVE_FILE);
|
||||
x += global_page_state(NR_ACTIVE_FILE);
|
||||
|
||||
if (!vm_highmem_is_dirtyable)
|
||||
x -= highmem_dirtyable_memory(x);
|
||||
|
||||
|
|
@ -288,32 +309,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
|
|||
trace_global_dirty_state(background, dirty);
|
||||
}
|
||||
|
||||
/**
|
||||
* zone_dirtyable_memory - number of dirtyable pages in a zone
|
||||
* @zone: the zone
|
||||
*
|
||||
* Returns the zone's number of pages potentially available for dirty
|
||||
* page cache. This is the base value for the per-zone dirty limits.
|
||||
*/
|
||||
static unsigned long zone_dirtyable_memory(struct zone *zone)
|
||||
{
|
||||
/*
|
||||
* The effective global number of dirtyable pages may exclude
|
||||
* highmem as a big-picture measure to keep the ratio between
|
||||
* dirty memory and lowmem reasonable.
|
||||
*
|
||||
* But this function is purely about the individual zone and a
|
||||
* highmem zone can hold its share of dirty pages, so we don't
|
||||
* care about vm_highmem_is_dirtyable here.
|
||||
*/
|
||||
unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
|
||||
zone_reclaimable_pages(zone);
|
||||
|
||||
/* don't allow this to underflow */
|
||||
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* zone_dirty_limit - maximum number of dirty pages allowed in a zone
|
||||
* @zone: the zone
|
||||
|
|
|
|||
|
|
@ -4285,7 +4285,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
|
|||
|
||||
page = ACCESS_ONCE(c->partial);
|
||||
if (page) {
|
||||
x = page->pobjects;
|
||||
node = page_to_nid(page);
|
||||
if (flags & SO_TOTAL)
|
||||
WARN_ON_ONCE(1);
|
||||
else if (flags & SO_OBJECTS)
|
||||
WARN_ON_ONCE(1);
|
||||
else
|
||||
x = page->pages;
|
||||
total += x;
|
||||
nodes[node] += x;
|
||||
}
|
||||
|
|
|
|||
49
mm/vmscan.c
49
mm/vmscan.c
|
|
@ -2117,6 +2117,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|||
return aborted_reclaim;
|
||||
}
|
||||
|
||||
static unsigned long zone_reclaimable_pages(struct zone *zone)
|
||||
{
|
||||
int nr;
|
||||
|
||||
nr = zone_page_state(zone, NR_ACTIVE_FILE) +
|
||||
zone_page_state(zone, NR_INACTIVE_FILE);
|
||||
|
||||
if (get_nr_swap_pages() > 0)
|
||||
nr += zone_page_state(zone, NR_ACTIVE_ANON) +
|
||||
zone_page_state(zone, NR_INACTIVE_ANON);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
static bool zone_reclaimable(struct zone *zone)
|
||||
{
|
||||
return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
|
||||
|
|
@ -3075,41 +3089,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
|
|||
wake_up_interruptible(&pgdat->kswapd_wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* The reclaimable count would be mostly accurate.
|
||||
* The less reclaimable pages may be
|
||||
* - mlocked pages, which will be moved to unevictable list when encountered
|
||||
* - mapped pages, which may require several travels to be reclaimed
|
||||
* - dirty pages, which is not "instantly" reclaimable
|
||||
*/
|
||||
unsigned long global_reclaimable_pages(void)
|
||||
{
|
||||
int nr;
|
||||
|
||||
nr = global_page_state(NR_ACTIVE_FILE) +
|
||||
global_page_state(NR_INACTIVE_FILE);
|
||||
|
||||
if (get_nr_swap_pages() > 0)
|
||||
nr += global_page_state(NR_ACTIVE_ANON) +
|
||||
global_page_state(NR_INACTIVE_ANON);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
unsigned long zone_reclaimable_pages(struct zone *zone)
|
||||
{
|
||||
int nr;
|
||||
|
||||
nr = zone_page_state(zone, NR_ACTIVE_FILE) +
|
||||
zone_page_state(zone, NR_INACTIVE_FILE);
|
||||
|
||||
if (get_nr_swap_pages() > 0)
|
||||
nr += zone_page_state(zone, NR_ACTIVE_ANON) +
|
||||
zone_page_state(zone, NR_INACTIVE_ANON);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
/*
|
||||
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
|
||||
|
|
|
|||
|
|
@ -74,36 +74,6 @@
|
|||
struct kmem_cache *skbuff_head_cache __read_mostly;
|
||||
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
|
||||
|
||||
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
put_page(buf->page);
|
||||
}
|
||||
|
||||
static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
get_page(buf->page);
|
||||
}
|
||||
|
||||
static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/* Pipe buffer operations for a socket. */
|
||||
static const struct pipe_buf_operations sock_pipe_buf_ops = {
|
||||
.can_merge = 0,
|
||||
.map = generic_pipe_buf_map,
|
||||
.unmap = generic_pipe_buf_unmap,
|
||||
.confirm = generic_pipe_buf_confirm,
|
||||
.release = sock_pipe_buf_release,
|
||||
.steal = sock_pipe_buf_steal,
|
||||
.get = sock_pipe_buf_get,
|
||||
};
|
||||
|
||||
/**
|
||||
* skb_panic - private function for out-of-line support
|
||||
* @skb: buffer
|
||||
|
|
@ -1811,7 +1781,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
|
|||
.partial = partial,
|
||||
.nr_pages_max = MAX_SKB_FRAGS,
|
||||
.flags = flags,
|
||||
.ops = &sock_pipe_buf_ops,
|
||||
.ops = &nosteal_pipe_buf_ops,
|
||||
.spd_release = sock_spd_release,
|
||||
};
|
||||
struct sk_buff *frag_iter;
|
||||
|
|
|
|||
|
|
@ -1402,9 +1402,13 @@ call_refreshresult(struct rpc_task *task)
|
|||
task->tk_action = call_refresh;
|
||||
switch (status) {
|
||||
case 0:
|
||||
if (rpcauth_uptodatecred(task))
|
||||
if (rpcauth_uptodatecred(task)) {
|
||||
task->tk_action = call_allocate;
|
||||
return;
|
||||
return;
|
||||
}
|
||||
/* Use rate-limiting and a max number of retries if refresh
|
||||
* had status 0 but failed to update the cred.
|
||||
*/
|
||||
case -ETIMEDOUT:
|
||||
rpc_delay(task, 3*HZ);
|
||||
case -EAGAIN:
|
||||
|
|
|
|||
|
|
@ -1941,7 +1941,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
|
|||
if (rc)
|
||||
goto out;
|
||||
|
||||
hashtab_insert(p->filename_trans, ft, otype);
|
||||
rc = hashtab_insert(p->filename_trans, ft, otype);
|
||||
if (rc) {
|
||||
/*
|
||||
* Do not return -EEXIST to the caller, or the system
|
||||
* will not boot.
|
||||
*/
|
||||
if (rc != -EEXIST)
|
||||
goto out;
|
||||
/* But free memory to avoid memory leak. */
|
||||
kfree(ft);
|
||||
kfree(name);
|
||||
kfree(otype);
|
||||
}
|
||||
}
|
||||
hash_eval(p->filename_trans, "filenametr");
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ DESTDIR :=
|
|||
|
||||
turbostat : turbostat.c
|
||||
CFLAGS += -Wall
|
||||
CFLAGS += -I../../../../arch/x86/include/uapi/
|
||||
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
|
||||
|
||||
%: %.c
|
||||
@mkdir -p $(BUILD_OUTPUT)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <asm/msr.h>
|
||||
#include MSRHEADER
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
|
@ -35,6 +35,7 @@
|
|||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <sched.h>
|
||||
#include <cpuid.h>
|
||||
|
||||
char *proc_stat = "/proc/stat";
|
||||
unsigned int interval_sec = 5; /* set with -i interval_sec */
|
||||
|
|
@ -1894,7 +1895,7 @@ void check_cpuid()
|
|||
|
||||
eax = ebx = ecx = edx = 0;
|
||||
|
||||
asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
|
||||
__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
|
||||
|
||||
if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
|
||||
genuine_intel = 1;
|
||||
|
|
@ -1903,7 +1904,7 @@ void check_cpuid()
|
|||
fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
|
||||
(char *)&ebx, (char *)&edx, (char *)&ecx);
|
||||
|
||||
asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
|
||||
__get_cpuid(1, &fms, &ebx, &ecx, &edx);
|
||||
family = (fms >> 8) & 0xf;
|
||||
model = (fms >> 4) & 0xf;
|
||||
stepping = fms & 0xf;
|
||||
|
|
@ -1925,7 +1926,7 @@ void check_cpuid()
|
|||
* This check is valid for both Intel and AMD.
|
||||
*/
|
||||
ebx = ecx = edx = 0;
|
||||
asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
|
||||
__get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
|
||||
|
||||
if (max_level < 0x80000007) {
|
||||
fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
|
||||
|
|
@ -1936,7 +1937,7 @@ void check_cpuid()
|
|||
* Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
|
||||
* this check is valid for both Intel and AMD
|
||||
*/
|
||||
asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
|
||||
__get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
|
||||
has_invariant_tsc = edx & (1 << 8);
|
||||
|
||||
if (!has_invariant_tsc) {
|
||||
|
|
@ -1949,7 +1950,7 @@ void check_cpuid()
|
|||
* this check is valid for both Intel and AMD
|
||||
*/
|
||||
|
||||
asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
|
||||
__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
|
||||
has_aperf = ecx & (1 << 0);
|
||||
do_dts = eax & (1 << 0);
|
||||
do_ptm = eax & (1 << 6);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue