Merge branch 'master' into for-2.6.34
Conflicts: include/linux/blkdev.h Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
commit
7f03292ee1
47 changed files with 401 additions and 236 deletions
|
|
@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
|
||||||
Default: 5
|
Default: 5
|
||||||
|
|
||||||
max_addresses - INTEGER
|
max_addresses - INTEGER
|
||||||
Number of maximum addresses per interface. 0 disables limitation.
|
Maximum number of autoconfigured addresses per interface. Setting
|
||||||
It is recommended not set too large value (or 0) because it would
|
to zero disables the limitation. It is not recommended to set this
|
||||||
be too easy way to crash kernel to allow to create too much of
|
value too large (or to zero) because it would be an easy way to
|
||||||
autoconfigured addresses.
|
crash the kernel by allowing too many addresses to be created.
|
||||||
Default: 16
|
Default: 16
|
||||||
|
|
||||||
disable_ipv6 - BOOLEAN
|
disable_ipv6 - BOOLEAN
|
||||||
|
|
|
||||||
|
|
@ -3489,9 +3489,9 @@ S: Maintained
|
||||||
F: drivers/net/wireless/libertas/
|
F: drivers/net/wireless/libertas/
|
||||||
|
|
||||||
MARVELL MV643XX ETHERNET DRIVER
|
MARVELL MV643XX ETHERNET DRIVER
|
||||||
M: Lennert Buytenhek <buytenh@marvell.com>
|
M: Lennert Buytenhek <buytenh@wantstofly.org>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Maintained
|
||||||
F: drivers/net/mv643xx_eth.*
|
F: drivers/net/mv643xx_eth.*
|
||||||
F: include/linux/mv643xx.h
|
F: include/linux/mv643xx.h
|
||||||
|
|
||||||
|
|
|
||||||
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 33
|
SUBLEVEL = 33
|
||||||
EXTRAVERSION = -rc8
|
EXTRAVERSION =
|
||||||
NAME = Man-Eating Seals of Antiquity
|
NAME = Man-Eating Seals of Antiquity
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
|
||||||
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
|
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
|
||||||
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
|
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
|
||||||
|
|
||||||
DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
|
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
|
||||||
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
|
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
|
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
|
||||||
|
|
|
||||||
|
|
@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
|
||||||
* Little endian
|
* Little endian
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a));
|
#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
|
||||||
#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
|
#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
|
||||||
|
|
||||||
#define in_le32(a) __le32_to_cpu(__raw_readl(a))
|
#define in_le32(a) __le32_to_cpu(__raw_readl(a))
|
||||||
|
|
|
||||||
|
|
@ -172,16 +172,15 @@ do { \
|
||||||
/* It is used only first parameter for OP - for wic, wdc */
|
/* It is used only first parameter for OP - for wic, wdc */
|
||||||
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
|
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
|
||||||
do { \
|
do { \
|
||||||
int step = -line_length; \
|
int volatile temp; \
|
||||||
int count = end - start; \
|
BUG_ON(end - start <= 0); \
|
||||||
BUG_ON(count <= 0); \
|
|
||||||
\
|
\
|
||||||
__asm__ __volatile__ (" 1: addk %0, %0, %1; \
|
__asm__ __volatile__ (" 1: " #op " %1, r0; \
|
||||||
" #op " %0, r0; \
|
cmpu %0, %1, %2; \
|
||||||
bgtid %1, 1b; \
|
bgtid %0, 1b; \
|
||||||
addk %1, %1, %2; \
|
addk %1, %1, %3; \
|
||||||
" : : "r" (start), "r" (count), \
|
" : : "r" (temp), "r" (start), "r" (end),\
|
||||||
"r" (step) : "memory"); \
|
"r" (line_length) : "memory"); \
|
||||||
} while (0);
|
} while (0);
|
||||||
|
|
||||||
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
|
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
|
||||||
|
|
@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
|
||||||
pr_debug("%s\n", __func__);
|
pr_debug("%s\n", __func__);
|
||||||
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
|
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
|
||||||
wdc.clear)
|
wdc.clear)
|
||||||
|
|
||||||
#if 0
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
pr_debug("%s\n", __func__);
|
|
||||||
|
|
||||||
/* Just loop through cache size and invalidate it */
|
|
||||||
for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
|
|
||||||
__invalidate_dcache(0, i);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __invalidate_dcache_range_wb(unsigned long start,
|
static void __invalidate_dcache_range_wb(unsigned long start,
|
||||||
|
|
|
||||||
|
|
@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Ignoring the last page when ddr size is 128M. Cached
|
||||||
|
* accesses to last page is causing the processor to prefetch
|
||||||
|
* using address above 128M stepping out of the ddr address
|
||||||
|
* space.
|
||||||
|
*/
|
||||||
|
if (mem == 0x8000000)
|
||||||
|
mem -= 0x1000;
|
||||||
|
|
||||||
add_memory_region(0, mem, BOOT_MEM_RAM);
|
add_memory_region(0, mem, BOOT_MEM_RAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ config PARISC
|
||||||
select BUG
|
select BUG
|
||||||
select HAVE_PERF_EVENTS
|
select HAVE_PERF_EVENTS
|
||||||
select GENERIC_ATOMIC64 if !64BIT
|
select GENERIC_ATOMIC64 if !64BIT
|
||||||
select HAVE_ARCH_TRACEHOOK
|
|
||||||
help
|
help
|
||||||
The PA-RISC microprocessor is designed by Hewlett-Packard and used
|
The PA-RISC microprocessor is designed by Hewlett-Packard and used
|
||||||
in many of their workstations & servers (HP9000 700 and 800 series,
|
in many of their workstations & servers (HP9000 700 and 800 series,
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/cache.h> /* for L1_CACHE_BYTES */
|
|
||||||
#include <asm/superio.h>
|
#include <asm/superio.h>
|
||||||
|
|
||||||
#define DEBUG_RESOURCES 0
|
#define DEBUG_RESOURCES 0
|
||||||
|
|
@ -123,6 +122,10 @@ static int __init pcibios_init(void)
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
|
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Set the CLS for PCI as early as possible. */
|
||||||
|
pci_cache_line_size = pci_dfl_cache_line_size;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
|
||||||
** upper byte is PCI_LATENCY_TIMER.
|
** upper byte is PCI_LATENCY_TIMER.
|
||||||
*/
|
*/
|
||||||
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
|
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
|
||||||
(0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
|
(0x80 << 8) | pci_cache_line_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -53,8 +53,8 @@ struct stat {
|
||||||
ino_t st_ino;
|
ino_t st_ino;
|
||||||
mode_t st_mode;
|
mode_t st_mode;
|
||||||
short st_nlink;
|
short st_nlink;
|
||||||
uid_t st_uid;
|
uid16_t st_uid;
|
||||||
gid_t st_gid;
|
gid16_t st_gid;
|
||||||
unsigned short st_rdev;
|
unsigned short st_rdev;
|
||||||
off_t st_size;
|
off_t st_size;
|
||||||
time_t st_atime;
|
time_t st_atime;
|
||||||
|
|
|
||||||
|
|
@ -450,6 +450,8 @@ struct thread_struct {
|
||||||
struct perf_event *ptrace_bps[HBP_NUM];
|
struct perf_event *ptrace_bps[HBP_NUM];
|
||||||
/* Debug status used for traps, single steps, etc... */
|
/* Debug status used for traps, single steps, etc... */
|
||||||
unsigned long debugreg6;
|
unsigned long debugreg6;
|
||||||
|
/* Keep track of the exact dr7 value set by the user */
|
||||||
|
unsigned long ptrace_dr7;
|
||||||
/* Fault info: */
|
/* Fault info: */
|
||||||
unsigned long cr2;
|
unsigned long cr2;
|
||||||
unsigned long trap_no;
|
unsigned long trap_no;
|
||||||
|
|
|
||||||
|
|
@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
|
||||||
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
|
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Store a breakpoint's encoded address, length, and type.
|
|
||||||
*/
|
|
||||||
static int arch_store_info(struct perf_event *bp)
|
|
||||||
{
|
|
||||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
|
||||||
/*
|
|
||||||
* For kernel-addresses, either the address or symbol name can be
|
|
||||||
* specified.
|
|
||||||
*/
|
|
||||||
if (info->name)
|
|
||||||
info->address = (unsigned long)
|
|
||||||
kallsyms_lookup_name(info->name);
|
|
||||||
if (info->address)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
int arch_bp_generic_fields(int x86_len, int x86_type,
|
int arch_bp_generic_fields(int x86_len, int x86_type,
|
||||||
int *gen_len, int *gen_type)
|
int *gen_len, int *gen_type)
|
||||||
{
|
{
|
||||||
|
|
@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = arch_store_info(bp);
|
/*
|
||||||
|
* For kernel-addresses, either the address or symbol name can be
|
||||||
if (ret < 0)
|
* specified.
|
||||||
return ret;
|
*/
|
||||||
|
if (info->name)
|
||||||
|
info->address = (unsigned long)
|
||||||
|
kallsyms_lookup_name(info->name);
|
||||||
/*
|
/*
|
||||||
* Check that the low-order bits of the address are appropriate
|
* Check that the low-order bits of the address are appropriate
|
||||||
* for the alignment implied by len.
|
* for the alignment implied by len.
|
||||||
|
|
|
||||||
|
|
@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
|
||||||
} else if (n == 6) {
|
} else if (n == 6) {
|
||||||
val = thread->debugreg6;
|
val = thread->debugreg6;
|
||||||
} else if (n == 7) {
|
} else if (n == 7) {
|
||||||
val = ptrace_get_dr7(thread->ptrace_bps);
|
val = thread->ptrace_dr7;
|
||||||
}
|
}
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
/* All that's left is DR7 */
|
/* All that's left is DR7 */
|
||||||
if (n == 7)
|
if (n == 7) {
|
||||||
rc = ptrace_write_dr7(tsk, val);
|
rc = ptrace_write_dr7(tsk, val);
|
||||||
|
if (!rc)
|
||||||
|
thread->ptrace_dr7 = val;
|
||||||
|
}
|
||||||
|
|
||||||
ret_path:
|
ret_path:
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
||||||
|
|
@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
*/
|
*/
|
||||||
static inline bool queue_should_plug(struct request_queue *q)
|
static inline bool queue_should_plug(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
|
return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __make_request(struct request_queue *q, struct bio *bio)
|
static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
@ -1856,15 +1856,8 @@ void blk_dequeue_request(struct request *rq)
|
||||||
* and to it is freed is accounted as io that is in progress at
|
* and to it is freed is accounted as io that is in progress at
|
||||||
* the driver side.
|
* the driver side.
|
||||||
*/
|
*/
|
||||||
if (blk_account_rq(rq)) {
|
if (blk_account_rq(rq))
|
||||||
q->in_flight[rq_is_sync(rq)]++;
|
q->in_flight[rq_is_sync(rq)]++;
|
||||||
/*
|
|
||||||
* Mark this device as supporting hardware queuing, if
|
|
||||||
* we have more IOs in flight than 4.
|
|
||||||
*/
|
|
||||||
if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
|
|
||||||
set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -880,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
||||||
return(acpi_idle_enter_c1(dev, state));
|
return(acpi_idle_enter_c1(dev, state));
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
if (cx->entry_method != ACPI_CSTATE_FFH) {
|
||||||
/*
|
current_thread_info()->status &= ~TS_POLLING;
|
||||||
* TS_POLLING-cleared state must be visible before we test
|
/*
|
||||||
* NEED_RESCHED:
|
* TS_POLLING-cleared state must be visible before we test
|
||||||
*/
|
* NEED_RESCHED:
|
||||||
smp_mb();
|
*/
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(need_resched())) {
|
if (unlikely(need_resched())) {
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
|
|
@ -965,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
if (cx->entry_method != ACPI_CSTATE_FFH) {
|
||||||
/*
|
current_thread_info()->status &= ~TS_POLLING;
|
||||||
* TS_POLLING-cleared state must be visible before we test
|
/*
|
||||||
* NEED_RESCHED:
|
* TS_POLLING-cleared state must be visible before we test
|
||||||
*/
|
* NEED_RESCHED:
|
||||||
smp_mb();
|
*/
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(need_resched())) {
|
if (unlikely(need_resched())) {
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
|
|
|
||||||
|
|
@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
|
||||||
if (result)
|
if (result)
|
||||||
goto update_bios;
|
goto update_bios;
|
||||||
|
|
||||||
return 0;
|
/* We need to call _PPC once when cpufreq starts */
|
||||||
|
if (ignore_ppc != 1)
|
||||||
|
result = acpi_processor_get_platform_limit(pr);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
|
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@
|
||||||
|
|
||||||
#define DRV_NAME "cs5535-clockevt"
|
#define DRV_NAME "cs5535-clockevt"
|
||||||
|
|
||||||
static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
|
static int timer_irq;
|
||||||
module_param_named(irq, timer_irq, int, 0644);
|
module_param_named(irq, timer_irq, int, 0644);
|
||||||
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
|
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||||
|
|
||||||
static int i915_drm_freeze(struct drm_device *dev)
|
static int i915_drm_freeze(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
pci_save_state(dev->pdev);
|
pci_save_state(dev->pdev);
|
||||||
|
|
||||||
/* If KMS is active, we do the leavevt stuff here */
|
/* If KMS is active, we do the leavevt stuff here */
|
||||||
|
|
@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||||
|
|
||||||
i915_save_state(dev);
|
i915_save_state(dev);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_drm_suspend(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
intel_opregion_free(dev, 1);
|
intel_opregion_free(dev, 1);
|
||||||
|
|
||||||
/* Modeset on resume, not lid events */
|
/* Modeset on resume, not lid events */
|
||||||
dev_priv->modeset_on_lid = 0;
|
dev_priv->modeset_on_lid = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||||
|
|
@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
i915_drm_suspend(dev);
|
|
||||||
|
|
||||||
if (state.event == PM_EVENT_SUSPEND) {
|
if (state.event == PM_EVENT_SUSPEND) {
|
||||||
/* Shut down the device */
|
/* Shut down the device */
|
||||||
pci_disable_device(dev->pdev);
|
pci_disable_device(dev->pdev);
|
||||||
|
|
@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev)
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
|
i915_restore_state(dev);
|
||||||
|
|
||||||
|
intel_opregion_init(dev, 1);
|
||||||
|
|
||||||
/* KMS EnterVT equivalent */
|
/* KMS EnterVT equivalent */
|
||||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev)
|
||||||
|
|
||||||
pci_set_master(dev->pdev);
|
pci_set_master(dev->pdev);
|
||||||
|
|
||||||
i915_restore_state(dev);
|
|
||||||
|
|
||||||
intel_opregion_init(dev, 1);
|
|
||||||
|
|
||||||
return i915_drm_thaw(dev);
|
return i915_drm_thaw(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev)
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
i915_drm_suspend(drm_dev);
|
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
pci_set_power_state(pdev, PCI_D3hot);
|
pci_set_power_state(pdev, PCI_D3hot);
|
||||||
|
|
||||||
|
|
@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||||
int error;
|
|
||||||
|
|
||||||
error = i915_drm_freeze(drm_dev);
|
return i915_drm_freeze(drm_dev);
|
||||||
if (!error)
|
|
||||||
i915_drm_suspend(drm_dev);
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct dev_pm_ops i915_pm_ops = {
|
const struct dev_pm_ops i915_pm_ops = {
|
||||||
|
|
|
||||||
|
|
@ -583,6 +583,7 @@ struct drm_nouveau_private {
|
||||||
uint64_t vm_end;
|
uint64_t vm_end;
|
||||||
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
|
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
|
||||||
int vm_vram_pt_nr;
|
int vm_vram_pt_nr;
|
||||||
|
uint64_t vram_sys_base;
|
||||||
|
|
||||||
/* the mtrr covering the FB */
|
/* the mtrr covering the FB */
|
||||||
int fb_mtrr;
|
int fb_mtrr;
|
||||||
|
|
|
||||||
|
|
@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
|
||||||
uint32_t flags, uint64_t phys)
|
uint32_t flags, uint64_t phys)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct nouveau_gpuobj **pgt;
|
struct nouveau_gpuobj *pgt;
|
||||||
unsigned psz, pfl, pages;
|
unsigned block;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (virt >= dev_priv->vm_gart_base &&
|
virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
|
||||||
(virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
|
size = (size >> 16) << 1;
|
||||||
psz = 12;
|
|
||||||
pgt = &dev_priv->gart_info.sg_ctxdma;
|
phys |= ((uint64_t)flags << 32);
|
||||||
pfl = 0x21;
|
phys |= 1;
|
||||||
virt -= dev_priv->vm_gart_base;
|
if (dev_priv->vram_sys_base) {
|
||||||
} else
|
phys += dev_priv->vram_sys_base;
|
||||||
if (virt >= dev_priv->vm_vram_base &&
|
phys |= 0x30;
|
||||||
(virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
|
|
||||||
psz = 16;
|
|
||||||
pgt = dev_priv->vm_vram_pt;
|
|
||||||
pfl = 0x01;
|
|
||||||
virt -= dev_priv->vm_vram_base;
|
|
||||||
} else {
|
|
||||||
NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
|
|
||||||
virt, virt + size - 1);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pages = size >> psz;
|
|
||||||
|
|
||||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||||
if (flags & 0x80000000) {
|
while (size) {
|
||||||
while (pages--) {
|
unsigned offset_h = upper_32_bits(phys);
|
||||||
struct nouveau_gpuobj *pt = pgt[virt >> 29];
|
unsigned offset_l = lower_32_bits(phys);
|
||||||
unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
|
unsigned pte, end;
|
||||||
|
|
||||||
nv_wo32(dev, pt, pte++, 0x00000000);
|
for (i = 7; i >= 0; i--) {
|
||||||
nv_wo32(dev, pt, pte++, 0x00000000);
|
block = 1 << (i + 1);
|
||||||
|
if (size >= block && !(virt & (block - 1)))
|
||||||
virt += (1 << psz);
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
offset_l |= (i << 7);
|
||||||
while (pages--) {
|
|
||||||
struct nouveau_gpuobj *pt = pgt[virt >> 29];
|
|
||||||
unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
|
|
||||||
unsigned offset_h = upper_32_bits(phys) & 0xff;
|
|
||||||
unsigned offset_l = lower_32_bits(phys);
|
|
||||||
|
|
||||||
nv_wo32(dev, pt, pte++, offset_l | pfl);
|
phys += block << 15;
|
||||||
nv_wo32(dev, pt, pte++, offset_h | flags);
|
size -= block;
|
||||||
|
|
||||||
phys += (1 << psz);
|
while (block) {
|
||||||
virt += (1 << psz);
|
pgt = dev_priv->vm_vram_pt[virt >> 14];
|
||||||
|
pte = virt & 0x3ffe;
|
||||||
|
|
||||||
|
end = pte + block;
|
||||||
|
if (end > 16384)
|
||||||
|
end = 16384;
|
||||||
|
block -= (end - pte);
|
||||||
|
virt += (end - pte);
|
||||||
|
|
||||||
|
while (pte < end) {
|
||||||
|
nv_wo32(dev, pgt, pte++, offset_l);
|
||||||
|
nv_wo32(dev, pgt, pte++, offset_h);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dev_priv->engine.instmem.finish_access(dev);
|
dev_priv->engine.instmem.finish_access(dev);
|
||||||
|
|
@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
|
||||||
void
|
void
|
||||||
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
|
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
|
||||||
{
|
{
|
||||||
nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct nouveau_gpuobj *pgt;
|
||||||
|
unsigned pages, pte, end;
|
||||||
|
|
||||||
|
virt -= dev_priv->vm_vram_base;
|
||||||
|
pages = (size >> 16) << 1;
|
||||||
|
|
||||||
|
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||||
|
while (pages) {
|
||||||
|
pgt = dev_priv->vm_vram_pt[virt >> 29];
|
||||||
|
pte = (virt & 0x1ffe0000ULL) >> 15;
|
||||||
|
|
||||||
|
end = pte + pages;
|
||||||
|
if (end > 16384)
|
||||||
|
end = 16384;
|
||||||
|
pages -= (end - pte);
|
||||||
|
virt += (end - pte) << 15;
|
||||||
|
|
||||||
|
while (pte < end)
|
||||||
|
nv_wo32(dev, pgt, pte++, 0);
|
||||||
|
}
|
||||||
|
dev_priv->engine.instmem.finish_access(dev);
|
||||||
|
|
||||||
|
nv_wr32(dev, 0x100c80, 0x00050001);
|
||||||
|
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
|
||||||
|
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
|
||||||
|
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
nv_wr32(dev, 0x100c80, 0x00000001);
|
||||||
|
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
|
||||||
|
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
|
||||||
|
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
|
||||||
struct drm_connector *connector)
|
struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
uint8_t saved_seq1, saved_pi, saved_rpc1;
|
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
|
||||||
uint8_t saved_palette0[3], saved_palette_mask;
|
uint8_t saved_palette0[3], saved_palette_mask;
|
||||||
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
|
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
|
||||||
int i;
|
int i;
|
||||||
|
|
@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
|
||||||
/* only implemented for head A for now */
|
/* only implemented for head A for now */
|
||||||
NVSetOwner(dev, 0);
|
NVSetOwner(dev, 0);
|
||||||
|
|
||||||
|
saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
|
||||||
|
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
|
||||||
|
|
||||||
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
|
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
|
||||||
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
|
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
|
||||||
|
|
||||||
|
|
@ -203,6 +206,7 @@ out:
|
||||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
|
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
|
||||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
|
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
|
||||||
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
|
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
|
||||||
|
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
|
||||||
|
|
||||||
if (blue == 0x18) {
|
if (blue == 0x18) {
|
||||||
NV_INFO(dev, "Load detected on head A\n");
|
NV_INFO(dev, "Load detected on head A\n");
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
|
||||||
for (i = 0x1700; i <= 0x1710; i += 4)
|
for (i = 0x1700; i <= 0x1710; i += 4)
|
||||||
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
|
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
|
||||||
|
|
||||||
|
if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
|
||||||
|
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
|
||||||
|
else
|
||||||
|
dev_priv->vram_sys_base = 0;
|
||||||
|
|
||||||
/* Reserve the last MiB of VRAM, we should probably try to avoid
|
/* Reserve the last MiB of VRAM, we should probably try to avoid
|
||||||
* setting up the below tables over the top of the VBIOS image at
|
* setting up the below tables over the top of the VBIOS image at
|
||||||
* some point.
|
* some point.
|
||||||
|
|
@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
|
||||||
* We map the entire fake channel into the start of the PRAMIN BAR
|
* We map the entire fake channel into the start of the PRAMIN BAR
|
||||||
*/
|
*/
|
||||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
|
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
|
||||||
0, &priv->pramin_pt);
|
0, &priv->pramin_pt);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
|
v = c_offset | 1;
|
||||||
if (v < (c_offset + c_size))
|
if (dev_priv->vram_sys_base) {
|
||||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
|
v += dev_priv->vram_sys_base;
|
||||||
else
|
v |= 0x30;
|
||||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
|
}
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
while (v < dev_priv->vram_sys_base + c_offset + c_size) {
|
||||||
|
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
|
||||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
|
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
|
||||||
|
v += 0x1000;
|
||||||
|
i += 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (i < pt_size) {
|
||||||
|
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
|
||||||
|
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
|
||||||
|
i += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
|
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
|
||||||
|
|
@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||||
uint32_t pte, pte_end, vram;
|
struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
|
||||||
|
uint32_t pte, pte_end;
|
||||||
|
uint64_t vram;
|
||||||
|
|
||||||
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
|
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
|
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
|
||||||
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
|
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
|
||||||
|
|
||||||
pte = (gpuobj->im_pramin->start >> 12) << 3;
|
pte = (gpuobj->im_pramin->start >> 12) << 1;
|
||||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
|
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
||||||
vram = gpuobj->im_backing_start;
|
vram = gpuobj->im_backing_start;
|
||||||
|
|
||||||
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
|
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
|
||||||
gpuobj->im_pramin->start, pte, pte_end);
|
gpuobj->im_pramin->start, pte, pte_end);
|
||||||
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
|
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
|
||||||
|
|
||||||
|
vram |= 1;
|
||||||
|
if (dev_priv->vram_sys_base) {
|
||||||
|
vram += dev_priv->vram_sys_base;
|
||||||
|
vram |= 0x30;
|
||||||
|
}
|
||||||
|
|
||||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||||
while (pte < pte_end) {
|
while (pte < pte_end) {
|
||||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
|
nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
|
||||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
|
nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
|
||||||
|
|
||||||
pte += 8;
|
|
||||||
vram += NV50_INSTMEM_PAGE_SIZE;
|
vram += NV50_INSTMEM_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
dev_priv->engine.instmem.finish_access(dev);
|
dev_priv->engine.instmem.finish_access(dev);
|
||||||
|
|
@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
if (gpuobj->im_bound == 0)
|
if (gpuobj->im_bound == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pte = (gpuobj->im_pramin->start >> 12) << 3;
|
pte = (gpuobj->im_pramin->start >> 12) << 1;
|
||||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
|
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
||||||
|
|
||||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||||
while (pte < pte_end) {
|
while (pte < pte_end) {
|
||||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
|
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
|
||||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
|
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
|
||||||
pte += 8;
|
|
||||||
}
|
}
|
||||||
dev_priv->engine.instmem.finish_access(dev);
|
dev_priv->engine.instmem.finish_access(dev);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
|
||||||
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
|
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||||
struct vmw_sw_context *sw_context,
|
struct vmw_sw_context *sw_context,
|
||||||
SVGA3dCmdHeader *header)
|
SVGAGuestPtr *ptr,
|
||||||
|
struct vmw_dma_buffer **vmw_bo_p)
|
||||||
{
|
{
|
||||||
uint32_t handle;
|
|
||||||
struct vmw_dma_buffer *vmw_bo = NULL;
|
struct vmw_dma_buffer *vmw_bo = NULL;
|
||||||
struct ttm_buffer_object *bo;
|
struct ttm_buffer_object *bo;
|
||||||
struct vmw_surface *srf = NULL;
|
uint32_t handle = ptr->gmrId;
|
||||||
struct vmw_dma_cmd {
|
|
||||||
SVGA3dCmdHeader header;
|
|
||||||
SVGA3dCmdSurfaceDMA dma;
|
|
||||||
} *cmd;
|
|
||||||
struct vmw_relocation *reloc;
|
struct vmw_relocation *reloc;
|
||||||
int ret;
|
|
||||||
uint32_t cur_validate_node;
|
uint32_t cur_validate_node;
|
||||||
struct ttm_validate_buffer *val_buf;
|
struct ttm_validate_buffer *val_buf;
|
||||||
|
int ret;
|
||||||
|
|
||||||
cmd = container_of(header, struct vmw_dma_cmd, header);
|
|
||||||
handle = cmd->dma.guest.ptr.gmrId;
|
|
||||||
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
|
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Could not find or use GMR region.\n");
|
DRM_ERROR("Could not find or use GMR region.\n");
|
||||||
|
|
@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||||
bo = &vmw_bo->base;
|
bo = &vmw_bo->base;
|
||||||
|
|
||||||
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
|
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
|
||||||
DRM_ERROR("Max number of DMA commands per submission"
|
DRM_ERROR("Max number relocations per submission"
|
||||||
" exceeded\n");
|
" exceeded\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_no_reloc;
|
goto out_no_reloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
reloc = &sw_context->relocs[sw_context->cur_reloc++];
|
reloc = &sw_context->relocs[sw_context->cur_reloc++];
|
||||||
reloc->location = &cmd->dma.guest.ptr;
|
reloc->location = ptr;
|
||||||
|
|
||||||
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
|
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
|
||||||
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
|
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
|
||||||
|
|
@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||||
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
|
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
|
||||||
++sw_context->cur_val_buf;
|
++sw_context->cur_val_buf;
|
||||||
}
|
}
|
||||||
|
*vmw_bo_p = vmw_bo;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_no_reloc:
|
||||||
|
vmw_dmabuf_unreference(&vmw_bo);
|
||||||
|
vmw_bo_p = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
SVGA3dCmdHeader *header)
|
||||||
|
{
|
||||||
|
struct vmw_dma_buffer *vmw_bo;
|
||||||
|
struct vmw_query_cmd {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdEndQuery q;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cmd = container_of(header, struct vmw_query_cmd, header);
|
||||||
|
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
|
||||||
|
&cmd->q.guestResult,
|
||||||
|
&vmw_bo);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vmw_dmabuf_unreference(&vmw_bo);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
SVGA3dCmdHeader *header)
|
||||||
|
{
|
||||||
|
struct vmw_dma_buffer *vmw_bo;
|
||||||
|
struct vmw_query_cmd {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdWaitForQuery q;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cmd = container_of(header, struct vmw_query_cmd, header);
|
||||||
|
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
|
||||||
|
&cmd->q.guestResult,
|
||||||
|
&vmw_bo);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vmw_dmabuf_unreference(&vmw_bo);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
SVGA3dCmdHeader *header)
|
||||||
|
{
|
||||||
|
struct vmw_dma_buffer *vmw_bo = NULL;
|
||||||
|
struct ttm_buffer_object *bo;
|
||||||
|
struct vmw_surface *srf = NULL;
|
||||||
|
struct vmw_dma_cmd {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdSurfaceDMA dma;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cmd = container_of(header, struct vmw_dma_cmd, header);
|
||||||
|
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
|
||||||
|
&cmd->dma.guest.ptr,
|
||||||
|
&vmw_bo);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bo = &vmw_bo->base;
|
||||||
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
|
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
|
||||||
cmd->dma.host.sid, &srf);
|
cmd->dma.host.sid, &srf);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
|
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
|
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
|
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
|
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
|
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
|
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
|
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
|
||||||
&vmw_cmd_blt_surf_screen_check)
|
&vmw_cmd_blt_surf_screen_check)
|
||||||
|
|
|
||||||
|
|
@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
|
||||||
struct input_polled_dev *dev = input_get_drvdata(input);
|
struct input_polled_dev *dev = input_get_drvdata(input);
|
||||||
|
|
||||||
cancel_delayed_work_sync(&dev->work);
|
cancel_delayed_work_sync(&dev->work);
|
||||||
|
/*
|
||||||
|
* Clean up work struct to remove references to the workqueue.
|
||||||
|
* It may be destroyed by the next call. This causes problems
|
||||||
|
* at next device open-close in case of poll_interval == 0.
|
||||||
|
*/
|
||||||
|
INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
|
||||||
input_polldev_stop_workqueue();
|
input_polldev_stop_workqueue();
|
||||||
|
|
||||||
if (dev->close)
|
if (dev->close)
|
||||||
|
|
|
||||||
|
|
@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
|
||||||
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
|
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
|
||||||
static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
|
static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
|
||||||
{
|
{
|
||||||
dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ;
|
dev->x = (pkt[2] << 8) | pkt[1];
|
||||||
dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ;
|
dev->y = (pkt[4] << 8) | pkt[3];
|
||||||
dev->press = pkt[5] & 0xff;
|
dev->press = pkt[5] & 0xff;
|
||||||
dev->touch = pkt[0] & 0x01;
|
dev->touch = pkt[0] & 0x01;
|
||||||
|
|
||||||
|
|
@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
|
||||||
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
|
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
|
||||||
[DEVTYPE_GENERAL_TOUCH] = {
|
[DEVTYPE_GENERAL_TOUCH] = {
|
||||||
.min_xc = 0x0,
|
.min_xc = 0x0,
|
||||||
.max_xc = 0x0500,
|
.max_xc = 0x7fff,
|
||||||
.min_yc = 0x0,
|
.min_yc = 0x0,
|
||||||
.max_yc = 0x0500,
|
.max_yc = 0x7fff,
|
||||||
.rept_size = 7,
|
.rept_size = 7,
|
||||||
.read_data = general_touch_read_data,
|
.read_data = general_touch_read_data,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -4006,11 +4006,21 @@ check_page:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!buffer_info->dma)
|
if (!buffer_info->dma) {
|
||||||
buffer_info->dma = pci_map_page(pdev,
|
buffer_info->dma = pci_map_page(pdev,
|
||||||
buffer_info->page, 0,
|
buffer_info->page, 0,
|
||||||
buffer_info->length,
|
buffer_info->length,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
|
||||||
|
put_page(buffer_info->page);
|
||||||
|
dev_kfree_skb(skb);
|
||||||
|
buffer_info->page = NULL;
|
||||||
|
buffer_info->skb = NULL;
|
||||||
|
buffer_info->dma = 0;
|
||||||
|
adapter->alloc_rx_buff_failed++;
|
||||||
|
break; /* while !buffer_info->skb */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rx_desc = E1000_RX_DESC(*rx_ring, i);
|
rx_desc = E1000_RX_DESC(*rx_ring, i);
|
||||||
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
||||||
|
|
@ -4101,6 +4111,13 @@ map_skb:
|
||||||
skb->data,
|
skb->data,
|
||||||
buffer_info->length,
|
buffer_info->length,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
|
||||||
|
dev_kfree_skb(skb);
|
||||||
|
buffer_info->skb = NULL;
|
||||||
|
buffer_info->dma = 0;
|
||||||
|
adapter->alloc_rx_buff_failed++;
|
||||||
|
break; /* while !buffer_info->skb */
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX if it was allocated cleanly it will never map to a
|
* XXX if it was allocated cleanly it will never map to a
|
||||||
|
|
|
||||||
|
|
@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
|
||||||
/* Transmit complete. */
|
/* Transmit complete. */
|
||||||
lp->lstats.tx_ints++;
|
lp->lstats.tx_ints++;
|
||||||
tc35815_txdone(dev);
|
tc35815_txdone(dev);
|
||||||
netif_wake_queue(dev);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -583,6 +583,11 @@ static const struct usb_device_id products [] = {
|
||||||
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
|
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
|
||||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||||
.driver_info = (unsigned long) &mbm_info,
|
.driver_info = (unsigned long) &mbm_info,
|
||||||
|
}, {
|
||||||
|
/* Ericsson C3607w ver 2 */
|
||||||
|
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
|
||||||
|
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||||
|
.driver_info = (unsigned long) &mbm_info,
|
||||||
}, {
|
}, {
|
||||||
/* Toshiba F3507g */
|
/* Toshiba F3507g */
|
||||||
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
|
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
|
||||||
|
|
|
||||||
|
|
@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
|
||||||
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
|
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
|
||||||
"%d index %d\n", scd_ssn , index);
|
"%d index %d\n", scd_ssn , index);
|
||||||
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
|
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
|
||||||
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
||||||
|
|
||||||
if (priv->mac80211_registered &&
|
if (priv->mac80211_registered &&
|
||||||
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
||||||
|
|
|
||||||
|
|
@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
|
||||||
scd_ssn , index, txq_id, txq->swq_id);
|
scd_ssn , index, txq_id, txq->swq_id);
|
||||||
|
|
||||||
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
|
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
|
||||||
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
||||||
|
|
||||||
if (priv->mac80211_registered &&
|
if (priv->mac80211_registered &&
|
||||||
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
||||||
|
|
@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
|
||||||
tx_resp->failure_frame);
|
tx_resp->failure_frame);
|
||||||
|
|
||||||
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
|
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
|
||||||
if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
|
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
||||||
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
|
||||||
|
|
||||||
if (priv->mac80211_registered &&
|
if (priv->mac80211_registered &&
|
||||||
(iwl_queue_space(&txq->q) > txq->q.low_mark))
|
(iwl_queue_space(&txq->q) > txq->q.low_mark))
|
||||||
iwl_wake_queue(priv, txq_id);
|
iwl_wake_queue(priv, txq_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
|
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
|
||||||
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
|
|
||||||
|
|
||||||
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
|
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
|
||||||
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
|
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
|
||||||
|
|
|
||||||
|
|
@ -2744,8 +2744,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
|
||||||
if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
|
if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
|
||||||
priv->staging_rxon.flags = 0;
|
priv->staging_rxon.flags = 0;
|
||||||
|
|
||||||
iwl_set_rxon_ht(priv, ht_conf);
|
|
||||||
iwl_set_rxon_channel(priv, conf->channel);
|
iwl_set_rxon_channel(priv, conf->channel);
|
||||||
|
iwl_set_rxon_ht(priv, ht_conf);
|
||||||
|
|
||||||
iwl_set_flags_for_band(priv, conf->channel->band);
|
iwl_set_flags_for_band(priv, conf->channel->band);
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
|
|
|
||||||
|
|
@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
|
||||||
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
|
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
|
||||||
struct iwl_tx_queue *txq);
|
struct iwl_tx_queue *txq);
|
||||||
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
||||||
|
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
|
||||||
|
int sta_id, int tid, int freed);
|
||||||
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||||
int slots_num, u32 txq_id);
|
int slots_num, u32 txq_id);
|
||||||
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
|
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
|
||||||
|
|
|
||||||
|
|
@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
||||||
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
|
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
|
||||||
|
|
||||||
|
|
||||||
|
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
|
||||||
|
int sta_id, int tid, int freed)
|
||||||
|
{
|
||||||
|
if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
|
||||||
|
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
||||||
|
else {
|
||||||
|
IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
|
||||||
|
priv->stations[sta_id].tid[tid].tfds_in_queue,
|
||||||
|
freed);
|
||||||
|
priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(iwl_free_tfds_in_queue);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iwl_tx_queue_free - Deallocate DMA queue.
|
* iwl_tx_queue_free - Deallocate DMA queue.
|
||||||
* @txq: Transmit queue to deallocate.
|
* @txq: Transmit queue to deallocate.
|
||||||
|
|
@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
|
||||||
struct iwl_queue *q = &txq->q;
|
struct iwl_queue *q = &txq->q;
|
||||||
struct iwl_tx_info *tx_info;
|
struct iwl_tx_info *tx_info;
|
||||||
int nfreed = 0;
|
int nfreed = 0;
|
||||||
|
struct ieee80211_hdr *hdr;
|
||||||
|
|
||||||
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
|
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
|
||||||
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
|
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
|
||||||
|
|
@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
|
||||||
|
|
||||||
tx_info = &txq->txb[txq->q.read_ptr];
|
tx_info = &txq->txb[txq->q.read_ptr];
|
||||||
iwl_tx_status(priv, tx_info->skb[0]);
|
iwl_tx_status(priv, tx_info->skb[0]);
|
||||||
|
|
||||||
|
hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
|
||||||
|
if (hdr && ieee80211_is_data_qos(hdr->frame_control))
|
||||||
|
nfreed++;
|
||||||
tx_info->skb[0] = NULL;
|
tx_info->skb[0] = NULL;
|
||||||
|
|
||||||
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
|
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
|
||||||
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
|
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
|
||||||
|
|
||||||
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
|
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
|
||||||
nfreed++;
|
|
||||||
}
|
}
|
||||||
return nfreed;
|
return nfreed;
|
||||||
}
|
}
|
||||||
|
|
@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
|
||||||
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
|
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
|
||||||
/* calculate mac80211 ampdu sw queue to wake */
|
/* calculate mac80211 ampdu sw queue to wake */
|
||||||
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
|
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
|
||||||
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
||||||
|
|
||||||
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
||||||
priv->mac80211_registered &&
|
priv->mac80211_registered &&
|
||||||
|
|
|
||||||
|
|
@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
|
||||||
acer_backlight_device = bd;
|
acer_backlight_device = bd;
|
||||||
|
|
||||||
bd->props.power = FB_BLANK_UNBLANK;
|
bd->props.power = FB_BLANK_UNBLANK;
|
||||||
bd->props.brightness = max_brightness;
|
bd->props.brightness = read_brightness(bd);
|
||||||
bd->props.max_brightness = max_brightness;
|
bd->props.max_brightness = max_brightness;
|
||||||
backlight_update_status(bd);
|
backlight_update_status(bd);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void efifb_destroy(struct fb_info *info)
|
||||||
|
{
|
||||||
|
if (info->screen_base)
|
||||||
|
iounmap(info->screen_base);
|
||||||
|
release_mem_region(info->aperture_base, info->aperture_size);
|
||||||
|
framebuffer_release(info);
|
||||||
|
}
|
||||||
|
|
||||||
static struct fb_ops efifb_ops = {
|
static struct fb_ops efifb_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
.fb_destroy = efifb_destroy,
|
||||||
.fb_setcolreg = efifb_setcolreg,
|
.fb_setcolreg = efifb_setcolreg,
|
||||||
.fb_fillrect = cfb_fillrect,
|
.fb_fillrect = cfb_fillrect,
|
||||||
.fb_copyarea = cfb_copyarea,
|
.fb_copyarea = cfb_copyarea,
|
||||||
|
|
@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
|
||||||
info->par = NULL;
|
info->par = NULL;
|
||||||
|
|
||||||
info->aperture_base = efifb_fix.smem_start;
|
info->aperture_base = efifb_fix.smem_start;
|
||||||
info->aperture_size = size_total;
|
info->aperture_size = size_remap;
|
||||||
|
|
||||||
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
|
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
|
||||||
if (!info->screen_base) {
|
if (!info->screen_base) {
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
/*
|
/*
|
||||||
* Blackfin On-Chip Watchdog Driver
|
* Blackfin On-Chip Watchdog Driver
|
||||||
* Supports BF53[123]/BF53[467]/BF54[2489]/BF561
|
|
||||||
*
|
*
|
||||||
* Originally based on softdog.c
|
* Originally based on softdog.c
|
||||||
* Copyright 2006-2007 Analog Devices Inc.
|
* Copyright 2006-2010 Analog Devices Inc.
|
||||||
* Copyright 2006-2007 Michele d'Amico
|
* Copyright 2006-2007 Michele d'Amico
|
||||||
* Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
|
* Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||||
*
|
*
|
||||||
|
|
@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
|
||||||
*/
|
*/
|
||||||
static int bfin_wdt_set_timeout(unsigned long t)
|
static int bfin_wdt_set_timeout(unsigned long t)
|
||||||
{
|
{
|
||||||
u32 cnt;
|
u32 cnt, max_t, sclk;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
stampit();
|
sclk = get_sclk();
|
||||||
|
max_t = -1 / sclk;
|
||||||
|
cnt = t * sclk;
|
||||||
|
stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
|
||||||
|
|
||||||
cnt = t * get_sclk();
|
if (t > max_t) {
|
||||||
if (cnt < get_sclk()) {
|
|
||||||
printk(KERN_WARNING PFX "timeout value is too large\n");
|
printk(KERN_WARNING PFX "timeout value is too large\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -637,7 +637,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
||||||
* will align it up.
|
* will align it up.
|
||||||
*/
|
*/
|
||||||
rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
|
rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
|
||||||
rlim_stack = min(rlim_stack, stack_size);
|
|
||||||
#ifdef CONFIG_STACK_GROWSUP
|
#ifdef CONFIG_STACK_GROWSUP
|
||||||
if (stack_size + stack_expand > rlim_stack)
|
if (stack_size + stack_expand > rlim_stack)
|
||||||
stack_base = vma->vm_start + rlim_stack;
|
stack_base = vma->vm_start + rlim_stack;
|
||||||
|
|
|
||||||
|
|
@ -461,9 +461,8 @@ struct request_queue
|
||||||
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
|
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
|
||||||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||||
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
|
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
|
||||||
#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
|
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
|
||||||
#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
|
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
|
||||||
#define QUEUE_FLAG_NOXMERGES 18 /* No extended merges */
|
|
||||||
|
|
||||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||||
(1 << QUEUE_FLAG_CLUSTER) | \
|
(1 << QUEUE_FLAG_CLUSTER) | \
|
||||||
|
|
@ -587,7 +586,6 @@ enum {
|
||||||
|
|
||||||
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
|
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
|
||||||
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
|
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
|
||||||
#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
|
|
||||||
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
||||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||||
#define blk_queue_noxmerges(q) \
|
#define blk_queue_noxmerges(q) \
|
||||||
|
|
|
||||||
|
|
@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
|
||||||
task_event->event_id.tid = perf_event_tid(event, task);
|
task_event->event_id.tid = perf_event_tid(event, task);
|
||||||
task_event->event_id.ptid = perf_event_tid(event, current);
|
task_event->event_id.ptid = perf_event_tid(event, current);
|
||||||
|
|
||||||
task_event->event_id.time = perf_clock();
|
|
||||||
|
|
||||||
perf_output_put(&handle, task_event->event_id);
|
perf_output_put(&handle, task_event->event_id);
|
||||||
|
|
||||||
perf_output_end(&handle);
|
perf_output_end(&handle);
|
||||||
|
|
@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event,
|
||||||
|
|
||||||
static int perf_event_task_match(struct perf_event *event)
|
static int perf_event_task_match(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
||||||
|
|
@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
|
||||||
cpuctx = &get_cpu_var(perf_cpu_context);
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
||||||
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
ctx = rcu_dereference(task_event->task->perf_event_ctxp);
|
ctx = rcu_dereference(current->perf_event_ctxp);
|
||||||
if (ctx)
|
if (ctx)
|
||||||
perf_event_task_ctx(ctx, task_event);
|
perf_event_task_ctx(ctx, task_event);
|
||||||
put_cpu_var(perf_cpu_context);
|
put_cpu_var(perf_cpu_context);
|
||||||
|
|
@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
|
||||||
/* .ppid */
|
/* .ppid */
|
||||||
/* .tid */
|
/* .tid */
|
||||||
/* .ptid */
|
/* .ptid */
|
||||||
|
.time = perf_clock(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event,
|
||||||
|
|
||||||
static int perf_event_comm_match(struct perf_event *event)
|
static int perf_event_comm_match(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
||||||
|
|
@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event,
|
||||||
static int perf_event_mmap_match(struct perf_event *event,
|
static int perf_event_mmap_match(struct perf_event *event,
|
||||||
struct perf_mmap_event *mmap_event)
|
struct perf_mmap_event *mmap_event)
|
||||||
{
|
{
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
||||||
|
|
|
||||||
|
|
@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
|
||||||
if (which > PRIO_USER || which < PRIO_PROCESS)
|
if (which > PRIO_USER || which < PRIO_PROCESS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
switch (which) {
|
switch (which) {
|
||||||
case PRIO_PROCESS:
|
case PRIO_PROCESS:
|
||||||
|
|
@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
|
||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
|
||||||
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
|
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
|
||||||
|
|
||||||
/* if already at the top layer, we need to grow */
|
/* if already at the top layer, we need to grow */
|
||||||
if (!(p = pa[l])) {
|
if (id >= 1 << (idp->layers * IDR_BITS)) {
|
||||||
*starting_id = id;
|
*starting_id = id;
|
||||||
return IDR_NEED_TO_GROW;
|
return IDR_NEED_TO_GROW;
|
||||||
}
|
}
|
||||||
|
p = pa[l];
|
||||||
|
BUG_ON(!p);
|
||||||
|
|
||||||
/* If we need to go up one layer, continue the
|
/* If we need to go up one layer, continue the
|
||||||
* loop; otherwise, restart from the top.
|
* loop; otherwise, restart from the top.
|
||||||
|
|
|
||||||
36
mm/migrate.c
36
mm/migrate.c
|
|
@ -1002,33 +1002,27 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
|
||||||
#define DO_PAGES_STAT_CHUNK_NR 16
|
#define DO_PAGES_STAT_CHUNK_NR 16
|
||||||
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
|
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
|
||||||
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
|
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
|
||||||
unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i += chunk_nr) {
|
while (nr_pages) {
|
||||||
if (chunk_nr > nr_pages - i)
|
unsigned long chunk_nr;
|
||||||
chunk_nr = nr_pages - i;
|
|
||||||
|
|
||||||
err = copy_from_user(chunk_pages, &pages[i],
|
chunk_nr = nr_pages;
|
||||||
chunk_nr * sizeof(*chunk_pages));
|
if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
|
||||||
if (err) {
|
chunk_nr = DO_PAGES_STAT_CHUNK_NR;
|
||||||
err = -EFAULT;
|
|
||||||
goto out;
|
if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
|
||||||
}
|
break;
|
||||||
|
|
||||||
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
|
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
|
||||||
|
|
||||||
err = copy_to_user(&status[i], chunk_status,
|
if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
|
||||||
chunk_nr * sizeof(*chunk_status));
|
break;
|
||||||
if (err) {
|
|
||||||
err = -EFAULT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = 0;
|
|
||||||
|
|
||||||
out:
|
pages += chunk_nr;
|
||||||
return err;
|
status += chunk_nr;
|
||||||
|
nr_pages -= chunk_nr;
|
||||||
|
}
|
||||||
|
return nr_pages ? -EFAULT : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
list_for_each_entry(c, &p->children, sibling) {
|
list_for_each_entry(c, &p->children, sibling) {
|
||||||
if (c->mm == p->mm)
|
if (c->mm == p->mm)
|
||||||
continue;
|
continue;
|
||||||
|
if (mem && !task_in_mem_cgroup(c, mem))
|
||||||
|
continue;
|
||||||
if (!oom_kill_task(c))
|
if (!oom_kill_task(c))
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case GRO_NORMAL:
|
case GRO_NORMAL:
|
||||||
case GRO_HELD:
|
case GRO_HELD:
|
||||||
skb->protocol = eth_type_trans(skb, napi->dev);
|
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||||
|
|
||||||
if (ret == GRO_HELD)
|
if (ret == GRO_HELD)
|
||||||
skb_gro_pull(skb, -ETH_HLEN);
|
skb_gro_pull(skb, -ETH_HLEN);
|
||||||
|
|
|
||||||
|
|
@ -63,12 +63,11 @@ int ima_inode_alloc(struct inode *inode)
|
||||||
spin_lock(&ima_iint_lock);
|
spin_lock(&ima_iint_lock);
|
||||||
rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
|
rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
|
||||||
spin_unlock(&ima_iint_lock);
|
spin_unlock(&ima_iint_lock);
|
||||||
|
radix_tree_preload_end();
|
||||||
out:
|
out:
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
kmem_cache_free(iint_cache, iint);
|
kmem_cache_free(iint_cache, iint);
|
||||||
|
|
||||||
radix_tree_preload_end();
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -272,6 +272,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pp->probes[0] = buf = zalloc(MAX_CMDLEN);
|
pp->probes[0] = buf = zalloc(MAX_CMDLEN);
|
||||||
|
pp->found = 1;
|
||||||
if (!buf)
|
if (!buf)
|
||||||
die("Failed to allocate memory by zalloc.");
|
die("Failed to allocate memory by zalloc.");
|
||||||
if (pp->offset) {
|
if (pp->offset) {
|
||||||
|
|
@ -294,6 +295,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
|
||||||
error:
|
error:
|
||||||
free(pp->probes[0]);
|
free(pp->probes[0]);
|
||||||
pp->probes[0] = NULL;
|
pp->probes[0] = NULL;
|
||||||
|
pp->found = 0;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -455,6 +457,7 @@ void show_perf_probe_events(void)
|
||||||
struct strlist *rawlist;
|
struct strlist *rawlist;
|
||||||
struct str_node *ent;
|
struct str_node *ent;
|
||||||
|
|
||||||
|
memset(&pp, 0, sizeof(pp));
|
||||||
fd = open_kprobe_events(O_RDONLY, 0);
|
fd = open_kprobe_events(O_RDONLY, 0);
|
||||||
rawlist = get_trace_kprobe_event_rawlist(fd);
|
rawlist = get_trace_kprobe_event_rawlist(fd);
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue