Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource

Conflicts:
	arch/ia64/kernel/cyclone.c
	arch/mips/kernel/i8253.c
	arch/x86/kernel/i8253.c

Reason: Resolve conflicts so further cleanups do not conflict further

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2011-05-14 12:06:36 +02:00
commit a18f22a968
10701 changed files with 620585 additions and 295266 deletions

View file

@ -233,6 +233,7 @@ header-y += magic.h
header-y += major.h
header-y += map_to_7segment.h
header-y += matroxfb.h
header-y += media.h
header-y += mempolicy.h
header-y += meye.h
header-y += mii.h
@ -276,6 +277,7 @@ header-y += nfsacl.h
header-y += nl80211.h
header-y += nubus.h
header-y += nvram.h
header-y += omap3isp.h
header-y += omapfb.h
header-y += oom.h
header-y += param.h
@ -370,6 +372,8 @@ header-y += unistd.h
header-y += usbdevice_fs.h
header-y += utime.h
header-y += utsname.h
header-y += v4l2-mediabus.h
header-y += v4l2-subdev.h
header-y += veth.h
header-y += vhost.h
header-y += videodev2.h

View file

@ -10,7 +10,6 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
return ioremap_cache(phys, size);
}
int acpi_os_map_generic_address(struct acpi_generic_address *addr);
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
#endif

View file

@ -7,6 +7,28 @@
#ifndef _AER_H_
#define _AER_H_
struct aer_header_log_regs {
unsigned int dw0;
unsigned int dw1;
unsigned int dw2;
unsigned int dw3;
};
struct aer_capability_regs {
u32 header;
u32 uncor_status;
u32 uncor_mask;
u32 uncor_severity;
u32 cor_status;
u32 cor_mask;
u32 cap_control;
struct aer_header_log_regs header_log;
u32 root_command;
u32 root_status;
u16 cor_err_source;
u16 uncor_err_source;
};
#if defined(CONFIG_PCIEAER)
/* pci-e port driver needs this function to enable aer */
extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
@ -27,5 +49,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
#endif
extern void cper_print_aer(const char *prefix, int cper_severity,
struct aer_capability_regs *aer);
#endif //_AER_H_

View file

@ -43,12 +43,12 @@ struct amba_id {
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, struct amba_id *);
int (*probe)(struct amba_device *, const struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
int (*resume)(struct amba_device *);
struct amba_id *id_table;
const struct amba_id *id_table;
};
enum amba_vendor {
@ -56,6 +56,10 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
};
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)

View file

@ -53,6 +53,7 @@
#define CNTL_LCDBPP8 (3 << 1)
#define CNTL_LCDBPP16 (4 << 1)
#define CNTL_LCDBPP16_565 (6 << 1)
#define CNTL_LCDBPP16_444 (7 << 1)
#define CNTL_LCDBPP24 (5 << 1)
#define CNTL_LCDBW (1 << 4)
#define CNTL_LCDTFT (1 << 5)
@ -66,6 +67,32 @@
#define CNTL_LDMAFIFOTIME (1 << 15)
#define CNTL_WATERMARK (1 << 16)
enum {
/* individual formats */
CLCD_CAP_RGB444 = (1 << 0),
CLCD_CAP_RGB5551 = (1 << 1),
CLCD_CAP_RGB565 = (1 << 2),
CLCD_CAP_RGB888 = (1 << 3),
CLCD_CAP_BGR444 = (1 << 4),
CLCD_CAP_BGR5551 = (1 << 5),
CLCD_CAP_BGR565 = (1 << 6),
CLCD_CAP_BGR888 = (1 << 7),
/* connection layouts */
CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
/* red/blue ordering */
CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
};
struct clcd_panel {
struct fb_videomode mode;
signed short width; /* width in mm */
@ -73,6 +100,7 @@ struct clcd_panel {
u32 tim2;
u32 tim3;
u32 cntl;
u32 caps;
unsigned int bpp:8,
fixedtimings:1,
grayscale:1;
@ -96,6 +124,11 @@ struct clcd_fb;
struct clcd_board {
const char *name;
/*
* Optional. Hardware capability flags.
*/
u32 caps;
/*
* Optional. Check whether the var structure is acceptable
* for this display.
@ -103,7 +136,7 @@ struct clcd_board {
int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
/*
* Compulsary. Decode fb->fb.var into regs->*. In the case of
* Compulsory. Decode fb->fb.var into regs->*. In the case of
* fixed timing, set regs->* to the register values required.
*/
void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
@ -155,34 +188,35 @@ struct clcd_fb {
static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
{
struct fb_var_screeninfo *var = &fb->fb.var;
u32 val, cpl;
/*
* Program the CLCD controller registers and start the CLCD
*/
val = ((fb->fb.var.xres / 16) - 1) << 2;
val |= (fb->fb.var.hsync_len - 1) << 8;
val |= (fb->fb.var.right_margin - 1) << 16;
val |= (fb->fb.var.left_margin - 1) << 24;
val = ((var->xres / 16) - 1) << 2;
val |= (var->hsync_len - 1) << 8;
val |= (var->right_margin - 1) << 16;
val |= (var->left_margin - 1) << 24;
regs->tim0 = val;
val = fb->fb.var.yres;
val = var->yres;
if (fb->panel->cntl & CNTL_LCDDUAL)
val /= 2;
val -= 1;
val |= (fb->fb.var.vsync_len - 1) << 10;
val |= fb->fb.var.lower_margin << 16;
val |= fb->fb.var.upper_margin << 24;
val |= (var->vsync_len - 1) << 10;
val |= var->lower_margin << 16;
val |= var->upper_margin << 24;
regs->tim1 = val;
val = fb->panel->tim2;
val |= fb->fb.var.sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
val |= fb->fb.var.sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
cpl = fb->fb.var.xres_virtual;
cpl = var->xres_virtual;
if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
/* / 1 */;
else if (!fb->fb.var.grayscale) /* STN color */
else if (!var->grayscale) /* STN color */
cpl = cpl * 8 / 3;
else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
cpl /= 8;
@ -194,10 +228,22 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
regs->tim3 = fb->panel->tim3;
val = fb->panel->cntl;
if (fb->fb.var.grayscale)
if (var->grayscale)
val |= CNTL_LCDBW;
switch (fb->fb.var.bits_per_pixel) {
if (fb->panel->caps && fb->board->caps &&
var->bits_per_pixel >= 16) {
/*
* if board and panel supply capabilities, we can support
* changing BGR/RGB depending on supplied parameters
*/
if (var->red.offset == 0)
val &= ~CNTL_BGR;
else
val |= CNTL_BGR;
}
switch (var->bits_per_pixel) {
case 1:
val |= CNTL_LCDBPP1;
break;
@ -212,15 +258,17 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
break;
case 16:
/*
* PL110 cannot choose between 5551 and 565 modes in
* its control register
* PL110 cannot choose between 5551 and 565 modes in its
* control register. It is possible to use 565 with
* custom external wiring.
*/
if ((fb->dev->periphid & 0x000fffff) == 0x00041110)
if (amba_part(fb->dev) == 0x110 ||
var->green.length == 5)
val |= CNTL_LCDBPP16;
else if (fb->fb.var.green.length == 5)
val |= CNTL_LCDBPP16;
else
else if (var->green.length == 6)
val |= CNTL_LCDBPP16_565;
else
val |= CNTL_LCDBPP16_444;
break;
case 32:
val |= CNTL_LCDBPP24;
@ -228,7 +276,7 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
}
regs->cntl = val;
regs->pixclock = fb->fb.var.pixclock;
regs->pixclock = var->pixclock;
}
static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)

View file

@ -6,6 +6,9 @@
#include <linux/mmc/host.h>
/* Just some dummy forwarding */
struct dma_chan;
/**
* struct mmci_platform_data - platform configuration for the MMCI
* (also known as PL180) block.
@ -27,6 +30,17 @@
* @cd_invert: true if the gpio_cd pin value is active low
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
* @dma_filter: function used to select an appropriate RX and TX
* DMA channel to be used for DMA, if and only if you're deploying the
* generic DMA engine
* @dma_rx_param: parameter passed to the DMA allocation
* filter in order to select an appropriate RX channel. If
* there is a bidirectional RX+TX channel, then just specify
* this and leave dma_tx_param set to NULL
* @dma_tx_param: parameter passed to the DMA allocation
* filter in order to select an appropriate TX channel. If this
* is NULL the driver will attempt to use the RX channel as a
* bidirectional channel
*/
struct mmci_platform_data {
unsigned int f_max;
@ -38,6 +52,9 @@ struct mmci_platform_data {
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
};
#endif

View file

@ -89,6 +89,7 @@ enum {
ATA_ID_SPG = 98,
ATA_ID_LBA_CAPACITY_2 = 100,
ATA_ID_SECTOR_SIZE = 106,
ATA_ID_WWN = 108,
ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */
ATA_ID_LAST_LUN = 126,
ATA_ID_DLF = 128,
@ -103,6 +104,7 @@ enum {
ATA_ID_SERNO_LEN = 20,
ATA_ID_FW_REV_LEN = 8,
ATA_ID_PROD_LEN = 40,
ATA_ID_WWN_LEN = 8,
ATA_PCI_CTL_OFS = 2,
@ -598,42 +600,42 @@ static inline bool ata_id_has_dipm(const u16 *id)
}
static inline int ata_id_has_fua(const u16 *id)
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_CFSSE] & (1 << 6);
}
static inline int ata_id_has_flush(const u16 *id)
static inline bool ata_id_has_flush(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
static inline int ata_id_flush_enabled(const u16 *id)
static inline bool ata_id_flush_enabled(const u16 *id)
{
if (ata_id_has_flush(id) == 0)
return 0;
return false;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
}
static inline int ata_id_has_flush_ext(const u16 *id)
static inline bool ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
static inline int ata_id_flush_ext_enabled(const u16 *id)
static inline bool ata_id_flush_ext_enabled(const u16 *id)
{
if (ata_id_has_flush_ext(id) == 0)
return 0;
return false;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return false;
/*
* some Maxtor disks have bit 13 defined incorrectly
* so check bit 10 too
@ -686,64 +688,64 @@ static inline u16 ata_id_logical_sector_offset(const u16 *id,
return 0;
}
static inline int ata_id_has_lba48(const u16 *id)
static inline bool ata_id_has_lba48(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
return 0;
return false;
if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2))
return 0;
return false;
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
static inline int ata_id_lba48_enabled(const u16 *id)
static inline bool ata_id_lba48_enabled(const u16 *id)
{
if (ata_id_has_lba48(id) == 0)
return 0;
return false;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
}
static inline int ata_id_hpa_enabled(const u16 *id)
static inline bool ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
return 0;
return false;
/* And 87 covers 85-87 */
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return false;
/* Check command sets enabled as well as supported */
if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0)
return 0;
return false;
return id[ATA_ID_COMMAND_SET_1] & (1 << 10);
}
static inline int ata_id_has_wcache(const u16 *id)
static inline bool ata_id_has_wcache(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_COMMAND_SET_1] & (1 << 5);
}
static inline int ata_id_has_pm(const u16 *id)
static inline bool ata_id_has_pm(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_COMMAND_SET_1] & (1 << 3);
}
static inline int ata_id_rahead_enabled(const u16 *id)
static inline bool ata_id_rahead_enabled(const u16 *id)
{
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_CFS_ENABLE_1] & (1 << 6);
}
static inline int ata_id_wcache_enabled(const u16 *id)
static inline bool ata_id_wcache_enabled(const u16 *id)
{
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return false;
return id[ATA_ID_CFS_ENABLE_1] & (1 << 5);
}
@ -773,7 +775,7 @@ static inline unsigned int ata_id_major_version(const u16 *id)
return mver;
}
static inline int ata_id_is_sata(const u16 *id)
static inline bool ata_id_is_sata(const u16 *id)
{
/*
* See if word 93 is 0 AND drive is at least ATA-5 compatible
@ -782,37 +784,40 @@ static inline int ata_id_is_sata(const u16 *id)
* 0x0000 and 0xffff along with the earlier ATA revisions...
*/
if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020)
return 1;
return 0;
return true;
return false;
}
static inline int ata_id_has_tpm(const u16 *id)
static inline bool ata_id_has_tpm(const u16 *id)
{
/* The TPM bits are only valid on ATA8 */
if (ata_id_major_version(id) < 8)
return 0;
return false;
if ((id[48] & 0xC000) != 0x4000)
return 0;
return false;
return id[48] & (1 << 0);
}
static inline int ata_id_has_dword_io(const u16 *id)
static inline bool ata_id_has_dword_io(const u16 *id)
{
/* ATA 8 reuses this flag for "trusted" computing */
if (ata_id_major_version(id) > 7)
return 0;
if (id[ATA_ID_DWORD_IO] & (1 << 0))
return 1;
return 0;
return false;
return id[ATA_ID_DWORD_IO] & (1 << 0);
}
static inline int ata_id_has_unload(const u16 *id)
static inline bool ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
(id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
id[ATA_ID_CFSSE] & (1 << 13))
return 1;
return 0;
return true;
return false;
}
static inline bool ata_id_has_wwn(const u16 *id)
{
return (id[ATA_ID_CSF_DEFAULT] & 0xC100) == 0x4100;
}
static inline int ata_id_form_factor(const u16 *id)
@ -843,25 +848,25 @@ static inline int ata_id_rotation_rate(const u16 *id)
return val;
}
static inline int ata_id_has_trim(const u16 *id)
static inline bool ata_id_has_trim(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
(id[ATA_ID_DATA_SET_MGMT] & 1))
return 1;
return 0;
return true;
return false;
}
static inline int ata_id_has_zero_after_trim(const u16 *id)
static inline bool ata_id_has_zero_after_trim(const u16 *id)
{
/* DSM supported, deterministic read, and read zero after trim set */
if (ata_id_has_trim(id) &&
(id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020)
return 1;
return true;
return 0;
return false;
}
static inline int ata_id_current_chs_valid(const u16 *id)
static inline bool ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
has not been issued to the device then the values of
@ -873,11 +878,11 @@ static inline int ata_id_current_chs_valid(const u16 *id)
id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */
}
static inline int ata_id_is_cfa(const u16 *id)
static inline bool ata_id_is_cfa(const u16 *id)
{
if ((id[ATA_ID_CONFIG] == 0x848A) || /* Traditional CF */
(id[ATA_ID_CONFIG] == 0x844A)) /* Delkin Devices CF */
return 1;
return true;
/*
* CF specs don't require specific value in the word 0 anymore and yet
* they forbid to report the ATA version in the word 80 and require the
@ -886,44 +891,40 @@ static inline int ata_id_is_cfa(const u16 *id)
* and while those that don't indicate CFA feature support need some
* sort of quirk list, it seems impractical for the ones that do...
*/
if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004)
return 1;
return 0;
return (id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004;
}
static inline int ata_id_is_ssd(const u16 *id)
static inline bool ata_id_is_ssd(const u16 *id)
{
return id[ATA_ID_ROT_SPEED] == 0x01;
}
static inline int ata_id_pio_need_iordy(const u16 *id, const u8 pio)
static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio)
{
/* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */
if (pio > 4 && ata_id_is_cfa(id))
return 0;
return false;
/* For PIO3 and higher it is mandatory. */
if (pio > 2)
return 1;
return true;
/* Turn it on when possible. */
if (ata_id_has_iordy(id))
return 1;
return 0;
return ata_id_has_iordy(id);
}
static inline int ata_drive_40wire(const u16 *dev_id)
static inline bool ata_drive_40wire(const u16 *dev_id)
{
if (ata_id_is_sata(dev_id))
return 0; /* SATA */
return false; /* SATA */
if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000)
return 0; /* 80 wire */
return 1;
return false; /* 80 wire */
return true;
}
static inline int ata_drive_40wire_relaxed(const u16 *dev_id)
static inline bool ata_drive_40wire_relaxed(const u16 *dev_id)
{
if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000)
return 0; /* 80 wire */
return 1;
return false; /* 80 wire */
return true;
}
static inline int atapi_cdb_len(const u16 *dev_id)
@ -936,12 +937,12 @@ static inline int atapi_cdb_len(const u16 *dev_id)
}
}
static inline int atapi_command_packet_set(const u16 *dev_id)
static inline bool atapi_command_packet_set(const u16 *dev_id)
{
return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
}
static inline int atapi_id_dmadir(const u16 *dev_id)
static inline bool atapi_id_dmadir(const u16 *dev_id)
{
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
@ -954,13 +955,13 @@ static inline int atapi_id_dmadir(const u16 *dev_id)
*
* It is called only once for each device.
*/
static inline int ata_id_is_lba_capacity_ok(u16 *id)
static inline bool ata_id_is_lba_capacity_ok(u16 *id)
{
unsigned long lba_sects, chs_sects, head, tail;
/* No non-LBA info .. so valid! */
if (id[ATA_ID_CYLS] == 0)
return 1;
return true;
lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
@ -975,13 +976,13 @@ static inline int ata_id_is_lba_capacity_ok(u16 *id)
id[ATA_ID_SECTORS] == 63 &&
(id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
(lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
return 1;
return true;
chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
/* perform a rough sanity check on lba_sects: within 10% is OK */
if (lba_sects - chs_sects < chs_sects/10)
return 1;
return true;
/* some drives have the word order reversed */
head = (lba_sects >> 16) & 0xffff;
@ -990,10 +991,10 @@ static inline int ata_id_is_lba_capacity_ok(u16 *id)
if (lba_sects - chs_sects < chs_sects/10) {
*(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
return 1; /* LBA capacity is (now) good */
return true; /* LBA capacity is (now) good */
}
return 0; /* LBA capacity value may be bad */
return false; /* LBA capacity value may be bad */
}
static inline void ata_id_to_hd_driveid(u16 *id)
@ -1051,19 +1052,19 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf)
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
static inline int ata_ok(u8 status)
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
== ATA_DRDY);
}
static inline int lba_28_ok(u64 block, u32 n_block)
static inline bool lba_28_ok(u64 block, u32 n_block)
{
/* check the ending block number: must be LESS THAN 0x0fffffff */
return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
}
static inline int lba_48_ok(u64 block, u32 n_block)
static inline bool lba_48_ok(u64 block, u32 n_block)
{
/* check the ending block number */
return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);

View file

@ -443,6 +443,7 @@ void atm_dev_signal_change(struct atm_dev *dev, char signal);
void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
/*
* This is approximately the algorithm used by alloc_skb.

View file

@ -103,6 +103,8 @@
#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */

View file

@ -66,8 +66,6 @@ struct backing_dev_info {
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
void *unplug_io_data;
char *name;
@ -251,7 +249,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
extern struct backing_dev_info default_backing_dev_info;
extern struct backing_dev_info noop_backing_dev_info;
void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
int writeback_in_progress(struct backing_dev_info *bdi);
@ -336,17 +333,4 @@ static inline int bdi_sched_wait(void *word)
return 0;
}
static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
struct page *page)
{
if (bdi && bdi->unplug_io_fn)
bdi->unplug_io_fn(bdi, page);
}
static inline void blk_run_address_space(struct address_space *mapping)
{
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
#endif /* _LINUX_BACKING_DEV_H */

View file

@ -32,6 +32,13 @@ enum backlight_update_reason {
BACKLIGHT_UPDATE_SYSFS,
};
enum backlight_type {
BACKLIGHT_RAW = 1,
BACKLIGHT_PLATFORM,
BACKLIGHT_FIRMWARE,
BACKLIGHT_TYPE_MAX,
};
struct backlight_device;
struct fb_info;
@ -62,6 +69,8 @@ struct backlight_properties {
/* FB Blanking active? (values as for power) */
/* Due to be removed, please use (state & BL_CORE_FBBLANK) */
int fb_blank;
/* Backlight type */
enum backlight_type type;
/* Flags used to signal drivers of state changes */
/* Upper 4 bits are reserved for driver internal use */
unsigned int state;

79
include/linux/bch.h Normal file
View file

@ -0,0 +1,79 @@
/*
* Generic binary BCH encoding/decoding library
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Copyright © 2011 Parrot S.A.
*
* Author: Ivan Djelic <ivan.djelic@parrot.com>
*
* Description:
*
* This library provides runtime configurable encoding/decoding of binary
* Bose-Chaudhuri-Hocquenghem (BCH) codes.
*/
#ifndef _BCH_H
#define _BCH_H
#include <linux/types.h>
/**
* struct bch_control - BCH control structure
* @m: Galois field order
* @n: maximum codeword size in bits (= 2^m-1)
* @t: error correction capability in bits
* @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t)
* @ecc_bytes: ecc max size (m*t bits) in bytes
* @a_pow_tab: Galois field GF(2^m) exponentiation lookup table
* @a_log_tab: Galois field GF(2^m) log lookup table
* @mod8_tab: remainder generator polynomial lookup tables
* @ecc_buf: ecc parity words buffer
* @ecc_buf2: ecc parity words buffer
* @xi_tab: GF(2^m) base for solving degree 2 polynomial roots
* @syn: syndrome buffer
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
*/
struct bch_control {
unsigned int m;
unsigned int n;
unsigned int t;
unsigned int ecc_bits;
unsigned int ecc_bytes;
/* private: */
uint16_t *a_pow_tab;
uint16_t *a_log_tab;
uint32_t *mod8_tab;
uint32_t *ecc_buf;
uint32_t *ecc_buf2;
unsigned int *xi_tab;
unsigned int *syn;
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
};
struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
void free_bch(struct bch_control *bch);
void encode_bch(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
#endif /* _BCH_H */

View file

@ -304,7 +304,6 @@ struct biovec_slab {
};
extern struct bio_set *fs_bio_set;
extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
/*
* a small number of entries is fine, not going to be performance critical.

View file

@ -23,11 +23,11 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
while (test_bit(bitnum, addr)) {
preempt_enable();
preempt_enable();
do {
cpu_relax();
preempt_disable();
}
} while (test_bit(bitnum, addr));
preempt_disable();
}
#endif
__acquire(bitlock);

View file

@ -128,7 +128,6 @@ enum rq_flag_bits {
__REQ_NOIDLE, /* don't anticipate more IO after this one */
/* bio only flags */
__REQ_UNPLUG, /* unplug the immediately after submission */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
@ -148,9 +147,11 @@ enum rq_flag_bits {
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_FLUSH, /* request for cache flush */
__REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
__REQ_ON_PLUG, /* on plug list */
__REQ_NR_BITS, /* stops here */
};
@ -170,7 +171,6 @@ enum rq_flag_bits {
REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED)
@ -188,8 +188,10 @@ enum rq_flag_bits {
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_FLUSH (1 << __REQ_FLUSH)
#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
#define REQ_ON_PLUG (1 << __REQ_ON_PLUG)
#endif /* __LINUX_BLK_TYPES_H */

View file

@ -108,11 +108,17 @@ struct request {
/*
* Three pointers are available for the IO schedulers, if they need
* more they have to dynamically allocate it.
* more they have to dynamically allocate it. Flush requests are
* never put on the IO scheduler. So let the flush fields share
* space with the three elevator_private pointers.
*/
void *elevator_private;
void *elevator_private2;
void *elevator_private3;
union {
void *elevator_private[3];
struct {
unsigned int seq;
struct list_head list;
} flush;
};
struct gendisk *rq_disk;
struct hd_struct *part;
@ -190,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
struct bvec_merge_data {
@ -273,7 +278,6 @@ struct request_queue
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
@ -287,12 +291,9 @@ struct request_queue
struct request *boundary_rq;
/*
* Auto-unplugging state
* Delayed queue handling
*/
struct timer_list unplug_timer;
int unplug_thresh; /* After this many requests */
unsigned long unplug_delay; /* After this many jiffies */
struct work_struct unplug_work;
struct delayed_work delay_work;
struct backing_dev_info backing_dev_info;
@ -363,11 +364,12 @@ struct request_queue
* for flush operations
*/
unsigned int flush_flags;
unsigned int flush_seq;
int flush_err;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
struct request flush_rq;
struct request *orig_flush_rq;
struct list_head pending_flushes;
struct mutex sysfs_lock;
@ -386,21 +388,19 @@ struct request_queue
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@ -472,7 +472,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
__clear_bit(flag, &q->queue_flags);
}
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@ -667,9 +666,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
extern void blk_plug_device_unlocked(struct request_queue *);
extern int blk_remove_plug(struct request_queue *);
extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
unsigned int, void __user *);
@ -699,8 +696,9 @@ extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@ -713,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern void blk_unplug(struct request_queue *q);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@ -850,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(struct request_queue *);
extern long nr_blockdev_pages(void);
int blk_get_queue(struct request_queue *);
@ -858,6 +854,44 @@ struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
struct blk_plug {
unsigned long magic;
struct list_head list;
struct list_head cb_list;
unsigned int should_sort;
};
struct blk_plug_cb {
struct list_head list;
void (*callback)(struct blk_plug_cb *);
};
extern void blk_start_plug(struct blk_plug *);
extern void blk_finish_plug(struct blk_plug *);
extern void blk_flush_plug_list(struct blk_plug *, bool);
static inline void blk_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
if (plug)
blk_flush_plug_list(plug, false);
}
static inline void blk_schedule_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
if (plug)
blk_flush_plug_list(plug, true);
}
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
}
/*
* tag stuff
*/
@ -1088,7 +1122,6 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
@ -1136,8 +1169,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
extern void throtl_shutdown_timer_wq(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
{
@ -1146,8 +1177,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
#endif /* CONFIG_BLK_DEV_THROTTLING */
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@ -1190,6 +1219,7 @@ struct blk_integrity {
struct kobject kobj;
};
extern bool blk_integrity_is_initialized(struct gendisk *);
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
@ -1246,6 +1276,7 @@ queue_max_integrity_segments(struct request_queue *q)
#define queue_max_integrity_segments(a) (0)
#define blk_integrity_merge_rq(a, b, c) (0)
#define blk_integrity_merge_bio(a, b, c) (0)
#define blk_integrity_is_initialized(a) (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@ -1281,6 +1312,31 @@ static inline long nr_blockdev_pages(void)
return 0;
}
struct blk_plug {
};
static inline void blk_start_plug(struct blk_plug *plug)
{
}
static inline void blk_finish_plug(struct blk_plug *plug)
{
}
static inline void blk_flush_plug(struct task_struct *task)
{
}
static inline void blk_schedule_flush_plug(struct task_struct *task)
{
}
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
return false;
}
#endif /* CONFIG_BLOCK */
#endif

View file

@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
extern void blk_dump_cmd(char *buf, struct request *rq);
extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */

View file

@ -19,10 +19,6 @@ extern unsigned long min_low_pfn;
*/
extern unsigned long max_pfn;
#ifdef CONFIG_CRASH_DUMP
extern unsigned long saved_max_pfn;
#endif
#ifndef CONFIG_NO_BOOTMEM
/*
* node_bootmem_map is a map pointer - the bits represent all physical

View file

@ -219,7 +219,6 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,

View file

@ -36,10 +36,10 @@
* @prot: pointer to struct proto structure.
*/
struct can_proto {
int type;
int protocol;
struct proto_ops *ops;
struct proto *prot;
int type;
int protocol;
const struct proto_ops *ops;
struct proto *prot;
};
/* function prototypes for the CAN networklayer core (af_can.c) */
@ -58,5 +58,6 @@ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
#endif /* CAN_CORE_H */

View file

@ -51,7 +51,7 @@
#define CAN_ERR_PROT_BIT1 0x10 /* unable to send recessive bit */
#define CAN_ERR_PROT_OVERLOAD 0x20 /* bus overload */
#define CAN_ERR_PROT_ACTIVE 0x40 /* active error announcement */
#define CAN_ERR_PROT_TX 0x80 /* error occured on transmission */
#define CAN_ERR_PROT_TX 0x80 /* error occurred on transmission */
/* error in CAN protocol (location) / data[3] */
#define CAN_ERR_PROT_LOC_UNSPEC 0x00 /* unspecified */

View file

@ -17,7 +17,7 @@
/*
* CAN bit-timing parameters
*
* For futher information, please read chapter "8 BIT TIMING
* For further information, please read chapter "8 BIT TIMING
* REQUIREMENTS" of the "Bosch CAN Specification version 2.0"
* at http://www.semiconductors.bosch.de/pdf/can2spec.pdf.
*/

View file

@ -12,6 +12,7 @@
/**
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
* @irq_flags: - IRQF configuration flags
* @board_specific_setup: - called before probing the chip (power,reset)
* @transceiver_enable: - called to power on/off the transceiver
* @power_enable: - called to power on/off the mcp *and* the
@ -24,6 +25,7 @@
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
unsigned long irq_flags;
int (*board_specific_setup)(struct spi_device *spi);
int (*transceiver_enable)(int enable);
int (*power_enable) (int enable);

View file

@ -368,6 +368,15 @@ struct cpu_vfs_cap_data {
#ifdef __KERNEL__
struct dentry;
struct user_namespace;
struct user_namespace *current_user_ns(void);
extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_full_set;
extern const kernel_cap_t __cap_init_eff_set;
/*
* Internal kernel functions only
*/
@ -530,40 +539,27 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
cap_intersect(permitted, __cap_nfsd_set));
}
extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_full_set;
extern const kernel_cap_t __cap_init_eff_set;
extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool has_capability_noaudit(struct task_struct *t, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool task_ns_capable(struct task_struct *t, int cap);
/**
* has_capability - Determine if a task has a superior capability available
* @t: The task in question
* @cap: The capability to be tested for
* nsown_capable - Check superior capability to one's own user_ns
* @cap: The capability in question
*
* Return true if the specified task has the given superior capability
* currently in effect, false if not.
*
* Note that this does not set PF_SUPERPRIV on the task.
* Return true if the current task has the given superior capability
* targeted at its own user namespace.
*/
#define has_capability(t, cap) (security_real_capable((t), (cap)) == 0)
/**
* has_capability_noaudit - Determine if a task has a superior capability available (unaudited)
* @t: The task in question
* @cap: The capability to be tested for
*
* Return true if the specified task has the given superior capability
* currently in effect, false if not, but don't write an audit message for the
* check.
*
* Note that this does not set PF_SUPERPRIV on the task.
*/
#define has_capability_noaudit(t, cap) \
(security_real_capable_noaudit((t), (cap)) == 0)
extern int capable(int cap);
static inline bool nsown_capable(int cap)
{
return ns_capable(current_user_ns(), cap);
}
/* audit system wants to get cap info from files as well */
struct dentry;
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
#endif /* __KERNEL__ */

View file

@ -149,7 +149,7 @@ typedef struct cdkhdr {
/*
* Define the memory mapping structure. This structure is pointed to by
* the memp field in the stlcdkhdr struct. As many as these structures
* as required are layed out in shared memory to define how the rest of
* as required are laid out in shared memory to define how the rest of
* shared memory is divided up. There will be one for each port.
*/
typedef struct cdkmem {

View file

@ -67,12 +67,12 @@ struct ceph_auth_client {
bool negotiating; /* true if negotiating protocol */
const char *name; /* entity name */
u64 global_id; /* our unique id in system */
const char *secret; /* our secret key */
const struct ceph_crypto_key *key; /* our secret key */
unsigned want_keys; /* which services we want */
};
extern struct ceph_auth_client *ceph_auth_init(const char *name,
const char *secret);
const struct ceph_crypto_key *key);
extern void ceph_auth_destroy(struct ceph_auth_client *ac);
extern void ceph_auth_reset(struct ceph_auth_client *ac);

View file

@ -136,9 +136,18 @@ struct ceph_dir_layout {
/* osd */
#define CEPH_MSG_OSD_MAP 41
#define CEPH_MSG_OSD_OP 42
#define CEPH_MSG_OSD_OPREPLY 43
#define CEPH_MSG_OSD_MAP 41
#define CEPH_MSG_OSD_OP 42
#define CEPH_MSG_OSD_OPREPLY 43
#define CEPH_MSG_WATCH_NOTIFY 44
/* watch-notify operations */
enum {
WATCH_NOTIFY = 1, /* notifying watcher */
WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */
};
/* pool operations */
enum {
@ -213,8 +222,10 @@ struct ceph_client_mount {
struct ceph_mon_request_header monhdr;
} __attribute__ ((packed));
#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
struct ceph_mon_subscribe_item {
__le64 have_version; __le64 have;
__le64 have_version; __le64 have;
__u8 onetime;
} __attribute__ ((packed));

View file

@ -61,7 +61,7 @@ struct ceph_options {
pointer type of args */
int num_mon;
char *name;
char *secret;
struct ceph_crypto_key *key;
};
/*
@ -71,7 +71,6 @@ struct ceph_options {
#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
#define CEPH_OSD_KEEPALIVE_DEFAULT 5
#define CEPH_OSD_IDLE_TTL_DEFAULT 60
#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)

View file

@ -123,6 +123,7 @@ struct ceph_msg_pos {
#define SOCK_CLOSED 11 /* socket state changed to closed */
#define OPENING 13 /* open connection w/ (possibly new) peer */
#define DEAD 14 /* dead, about to kfree */
#define BACKOFF 15
/*
* A single connection with another host.
@ -160,7 +161,6 @@ struct ceph_connection {
struct list_head out_queue;
struct list_head out_sent; /* sending or sent but unacked */
u64 out_seq; /* last message queued for send */
bool out_keepalive_pending;
u64 in_seq, in_seq_acked; /* last message received, acked */

View file

@ -32,6 +32,7 @@ struct ceph_osd {
struct rb_node o_node;
struct ceph_connection o_con;
struct list_head o_requests;
struct list_head o_linger_requests;
struct list_head o_osd_lru;
struct ceph_authorizer *o_authorizer;
void *o_authorizer_buf, *o_authorizer_reply_buf;
@ -47,6 +48,8 @@ struct ceph_osd_request {
struct rb_node r_node;
struct list_head r_req_lru_item;
struct list_head r_osd_item;
struct list_head r_linger_item;
struct list_head r_linger_osd;
struct ceph_osd *r_osd;
struct ceph_pg r_pgid;
int r_pg_osds[CEPH_PG_MAX_SIZE];
@ -59,6 +62,7 @@ struct ceph_osd_request {
int r_flags; /* any additional flags for the osd */
u32 r_sent; /* >0 if r_request is sending/sent */
int r_got_reply;
int r_linger;
struct ceph_osd_client *r_osdc;
struct kref r_kref;
@ -74,7 +78,6 @@ struct ceph_osd_request {
char r_oid[40]; /* object name */
int r_oid_len;
unsigned long r_stamp; /* send OR check time */
bool r_resend; /* msg send failed, needs retry */
struct ceph_file_layout r_file_layout;
struct ceph_snap_context *r_snapc; /* snap context for writes */
@ -90,6 +93,26 @@ struct ceph_osd_request {
struct ceph_pagelist *r_trail; /* trailing part of the data */
};
struct ceph_osd_event {
u64 cookie;
int one_shot;
struct ceph_osd_client *osdc;
void (*cb)(u64, u64, u8, void *);
void *data;
struct rb_node node;
struct list_head osd_node;
struct kref kref;
struct completion completion;
};
struct ceph_osd_event_work {
struct work_struct work;
struct ceph_osd_event *event;
u64 ver;
u64 notify_id;
u8 opcode;
};
struct ceph_osd_client {
struct ceph_client *client;
@ -104,7 +127,10 @@ struct ceph_osd_client {
u64 timeout_tid; /* tid of timeout triggering rq */
u64 last_tid; /* tid of last request */
struct rb_root requests; /* pending requests */
struct list_head req_lru; /* pending requests lru */
struct list_head req_lru; /* in-flight lru */
struct list_head req_unsent; /* unsent/need-resend queue */
struct list_head req_notarget; /* map to no osd */
struct list_head req_linger; /* lingering requests */
int num_requests;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
@ -116,6 +142,12 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op;
struct ceph_msgpool msgpool_op_reply;
spinlock_t event_lock;
struct rb_root event_tree;
u64 event_count;
struct workqueue_struct *notify_wq;
};
struct ceph_osd_req_op {
@ -150,6 +182,13 @@ struct ceph_osd_req_op {
struct {
u64 snapid;
} snap;
struct {
u64 cookie;
u64 ver;
__u8 flag;
u32 prot_ver;
u32 timeout;
} watch;
};
u32 payload_len;
};
@ -198,6 +237,11 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
bool use_mempool, int num_reply,
int page_align);
extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
extern void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
{
kref_get(&req->r_kref);
@ -233,5 +277,14 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct page **pages, int nr_pages,
int flags, int do_sync, bool nofail);
/* watch/notify events */
extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
void (*event_cb)(u64, u64, u8, void *),
int one_shot, void *data,
struct ceph_osd_event **pevent);
extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
extern int ceph_osdc_wait_event(struct ceph_osd_event *event,
unsigned long timeout);
extern void ceph_osdc_put_event(struct ceph_osd_event *event);
#endif

View file

@ -12,9 +12,9 @@
* osdmap encoding versions
*/
#define CEPH_OSDMAP_INC_VERSION 5
#define CEPH_OSDMAP_INC_VERSION_EXT 5
#define CEPH_OSDMAP_INC_VERSION_EXT 6
#define CEPH_OSDMAP_VERSION 5
#define CEPH_OSDMAP_VERSION_EXT 5
#define CEPH_OSDMAP_VERSION_EXT 6
/*
* fs id
@ -181,9 +181,17 @@ enum {
/* read */
CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3,
/* fancy read */
CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5,
CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6,
CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7,
/* versioning */
CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8,
/* write */
CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
@ -205,6 +213,8 @@ enum {
CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
/** attrs **/
/* read */
CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
@ -218,11 +228,14 @@ enum {
CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
/** subop **/
CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
/** lock **/
CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
@ -328,6 +341,8 @@ enum {
CEPH_OSD_CMPXATTR_MODE_U64 = 2
};
#define RADOS_NOTIFY_VER 1
/*
* an individual object operation. each may be accompanied by some data
* payload
@ -359,7 +374,12 @@ struct ceph_osd_op {
struct {
__le64 snapid;
} __attribute__ ((packed)) snap;
};
struct {
__le64 cookie;
__le64 ver;
__u8 flag; /* 0 = unwatch, 1 = watch */
} __attribute__ ((packed)) watch;
};
__le32 payload_len;
} __attribute__ ((packed));
@ -402,4 +422,5 @@ struct ceph_osd_reply_head {
} __attribute__ ((packed));
#endif

View file

@ -44,7 +44,7 @@ extern unsigned char * cfag12864b_buffer;
/*
* Get the refresh rate of the LCD
*
* Returns the refresh rate (hertzs).
* Returns the refresh rate (hertz).
*/
extern unsigned int cfag12864b_getrate(void);

View file

@ -240,7 +240,7 @@ struct cgroup {
/* For RCU-protected deletion */
struct rcu_head rcu_head;
/* List of events which userspace want to recieve */
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
};
@ -474,7 +474,8 @@ struct cgroup_subsys {
struct cgroup *old_cgrp, struct task_struct *tsk,
bool threadgroup);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
@ -626,6 +627,7 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
unsigned short css_depth(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
#else /* !CONFIG_CGROUPS */

View file

@ -65,4 +65,8 @@ SUBSYS(net_cls)
SUBSYS(blkio)
#endif
#ifdef CONFIG_CGROUP_PERF
SUBSYS(perf)
#endif
/* */

View file

@ -341,4 +341,6 @@ static inline void update_vsyscall_tz(void)
extern void timekeeping_notify(struct clocksource *clock);
extern int clocksource_i8253_init(void);
#endif /* _LINUX_CLOCKSOURCE_H */

View file

@ -20,7 +20,7 @@ typedef struct atreq {
} atreq_t;
/* what is particularly stupid in the original driver is the arch-dependant
/* what is particularly stupid in the original driver is the arch-dependent
* member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
* will lay out the structure members differently than the 64bit kernel.
*

View file

@ -11,9 +11,6 @@
/* The full zone was compacted */
#define COMPACT_COMPLETE 3
#define COMPACT_MODE_DIRECT_RECLAIM 0
#define COMPACT_MODE_KSWAPD 1
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
bool sync);
extern unsigned long compaction_suitable(struct zone *zone, int order);
extern unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, bool sync,
int compact_mode);
gfp_t gfp_mask, bool sync);
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
}
static inline unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, bool sync,
int compact_mode)
gfp_t gfp_mask, bool sync)
{
return COMPACT_CONTINUE;
}

View file

@ -92,3 +92,11 @@
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
#define __always_inline inline __attribute__((always_inline))

View file

@ -21,11 +21,3 @@
# error "GCOV profiling support for gcc versions below 3.4 not included"
# endif /* __GNUC_MINOR__ */
#endif /* CONFIG_GCOV_KERNEL */
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
#define __always_inline inline __attribute__((always_inline))

View file

@ -12,13 +12,6 @@
#define __used __attribute__((__used__))
#define __must_check __attribute__((warn_unused_result))
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
#define __always_inline inline __attribute__((always_inline))
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
#if __GNUC_MINOR__ >= 3
/* Mark functions as cold. gcc will assume any path leading to a call
@ -53,7 +46,6 @@
#define __noclone __attribute__((__noclone__))
#endif
#endif
#if __GNUC_MINOR__ > 0

View file

@ -218,7 +218,7 @@ static ssize_t _item##_attr_store(struct config_item *item, \
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
* children (disallowing mkdir(2)), it need not provide either function.
* If the group has commit(), it supports pending and commited (active)
* If the group has commit(), it supports pending and committed (active)
* items.
*/
struct configfs_item_operations {

View file

@ -42,8 +42,9 @@
#define CN_VAL_DM_USERSPACE_LOG 0x1
#define CN_IDX_DRBD 0x8
#define CN_VAL_DRBD 0x1
#define CN_KVP_IDX 0x9 /* HyperV KVP */
#define CN_NETLINK_USERS 8
#define CN_NETLINK_USERS 9
/*
* Maximum connector's message size.
@ -87,8 +88,6 @@ struct cn_queue_dev {
atomic_t refcnt;
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
struct list_head queue_list;
spinlock_t queue_lock;
@ -100,20 +99,13 @@ struct cn_callback_id {
struct cb_id id;
};
struct cn_callback_data {
struct sk_buff *skb;
void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
void *free;
};
struct cn_callback_entry {
struct list_head callback_entry;
struct work_struct work;
atomic_t refcnt;
struct cn_queue_dev *pdev;
struct cn_callback_id id;
struct cn_callback_data data;
void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
u32 seq, group;
};
@ -128,19 +120,21 @@ struct cn_dev {
struct cn_queue_dev *cbdev;
};
int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
int cn_add_callback(struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_del_callback(struct cb_id *);
int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
int cn_cb_equal(struct cb_id *, struct cb_id *);
void cn_queue_wrapper(struct work_struct *work);
#endif /* __KERNEL__ */
#endif /* __CONNECTOR_H */

View file

@ -310,7 +310,7 @@ struct cper_sec_proc_ia {
__u8 cpuid[48];
};
/* IA32/X64 Processor Error Infomation Structure */
/* IA32/X64 Processor Error Information Structure */
struct cper_ia_err_info {
uuid_le err_type;
__u64 validation_bits;
@ -388,5 +388,7 @@ struct cper_sec_pcie {
#pragma pack()
u64 cper_next_record_id(void);
void cper_print_bits(const char *prefix, unsigned int bits,
const char *strs[], unsigned int strs_size);
#endif

73
include/linux/cpu_rmap.h Normal file
View file

@ -0,0 +1,73 @@
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/cpumask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
/**
* struct cpu_rmap - CPU affinity reverse-map
* @size: Number of objects to be reverse-mapped
* @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
u16 size, used;
void **obj;
struct {
u16 index;
u16 dist;
} near[0];
};
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
/**
* free_cpu_rmap - free CPU affinity reverse-map
* @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
*/
static inline void free_cpu_rmap(struct cpu_rmap *rmap)
{
kfree(rmap);
}
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
const struct cpumask *affinity);
static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
{
return rmap->near[cpu].index;
}
static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
{
return rmap->obj[rmap->near[cpu].index];
}
#ifdef CONFIG_GENERIC_HARDIRQS
/**
* alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
* @size: Number of objects to be mapped
*
* Must be called in process context.
*/
static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
{
return alloc_cpu_rmap(size, GFP_KERNEL);
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif

View file

@ -230,7 +230,7 @@ struct cpufreq_driver {
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
@ -281,19 +281,10 @@ __ATTR(_name, 0444, show_##_name, NULL)
static struct freq_attr _name = \
__ATTR(_name, _perm, show_##_name, NULL)
#define cpufreq_freq_attr_ro_old(_name) \
static struct freq_attr _name##_old = \
__ATTR(_name, 0444, show_##_name##_old, NULL)
#define cpufreq_freq_attr_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
#define cpufreq_freq_attr_rw_old(_name) \
static struct freq_attr _name##_old = \
__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,

View file

@ -11,7 +11,7 @@
extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)data, length)
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
/*
* Helpers for hash table generation of ethernet nics:

View file

@ -354,9 +354,11 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
#define current_user_ns() (current_cred_xxx(user)->user_ns)
#define _current_user_ns() (current_cred_xxx(user)->user_ns)
#define current_security() (current_cred_xxx(security))
extern struct user_namespace *current_user_ns(void);
#define current_uid_gid(_uid, _gid) \
do { \
const struct cred *__cred; \

View file

@ -36,6 +36,7 @@ struct emac_platform_data {
u8 rmii_en;
u8 version;
bool no_bd_ram;
void (*interrupt_enable) (void);
void (*interrupt_disable) (void);
};

View file

@ -168,7 +168,7 @@ struct dentry_operations {
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool, bool);
int (*d_manage)(struct dentry *, bool);
} ____cacheline_aligned;
/*
@ -197,7 +197,7 @@ struct dentry_operations {
* typically using d_splice_alias. */
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
#define DCACHE_UNHASHED 0x0010
#define DCACHE_RCUACCESS 0x0010 /* Entry has ever been RCU-visible */
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020
/* Parent inode is watched by inotify */
@ -384,7 +384,7 @@ extern struct dentry *dget_parent(struct dentry *dentry);
static inline int d_unhashed(struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_UNHASHED);
return hlist_bl_unhashed(&dentry->d_hash);
}
static inline int d_unlinked(struct dentry *dentry)
@ -416,7 +416,6 @@ static inline bool d_mountpoint(struct dentry *dentry)
return dentry->d_flags & DCACHE_MOUNTED;
}
extern struct vfsmount *lookup_mnt(struct path *);
extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
extern int sysctl_vfs_cache_pressure;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, Intel Corporation.
* Copyright (c) 2008-2011, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@ -25,9 +25,14 @@
/* IEEE 802.1Qaz std supported values */
#define IEEE_8021QAZ_MAX_TCS 8
#define IEEE_8021QAZ_TSA_STRICT 0
#define IEEE_8021QAZ_TSA_CB_SHAPER 1
#define IEEE_8021QAZ_TSA_ETS 2
#define IEEE_8021QAZ_TSA_VENDOR 255
/* This structure contains the IEEE 802.1Qaz ETS managed object
*
* @willing: willing bit in ETS configuratin TLV
* @willing: willing bit in ETS configuration TLV
* @ets_cap: indicates supported capacity of ets feature
* @cbs: credit based shaper ets algorithm supported
* @tc_tx_bw: tc tx bandwidth indexed by traffic class
@ -82,6 +87,50 @@ struct ieee_pfc {
__u64 indications[IEEE_8021QAZ_MAX_TCS];
};
/* CEE DCBX std supported values */
#define CEE_DCBX_MAX_PGS 8
#define CEE_DCBX_MAX_PRIO 8
/**
* struct cee_pg - CEE Priority-Group managed object
*
* @willing: willing bit in the PG tlv
* @error: error bit in the PG tlv
* @pg_en: enable bit of the PG feature
* @tcs_supported: number of traffic classes supported
* @pg_bw: bandwidth percentage for each priority group
* @prio_pg: priority to PG mapping indexed by priority
*/
struct cee_pg {
__u8 willing;
__u8 error;
__u8 pg_en;
__u8 tcs_supported;
__u8 pg_bw[CEE_DCBX_MAX_PGS];
__u8 prio_pg[CEE_DCBX_MAX_PGS];
};
/**
* struct cee_pfc - CEE PFC managed object
*
* @willing: willing bit in the PFC tlv
* @error: error bit in the PFC tlv
* @pfc_en: bitmap indicating pfc enabled traffic classes
* @tcs_supported: number of traffic classes supported
*/
struct cee_pfc {
__u8 willing;
__u8 error;
__u8 pfc_en;
__u8 tcs_supported;
};
/* IEEE 802.1Qaz std supported values */
#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
#define IEEE_8021QAZ_APP_SEL_STREAM 2
#define IEEE_8021QAZ_APP_SEL_DGRAM 3
#define IEEE_8021QAZ_APP_SEL_ANY 4
/* This structure contains the IEEE 802.1Qaz APP managed object. This
* object is also used for the CEE std as well. There is no difference
* between the objects.
@ -101,8 +150,22 @@ struct ieee_pfc {
*/
struct dcb_app {
__u8 selector;
__u32 protocol;
__u8 priority;
__u16 protocol;
};
/**
* struct dcb_peer_app_info - APP feature information sent by the peer
*
* @willing: willing bit in the peer APP tlv
* @error: error bit in the peer APP tlv
*
* In addition to this information the full peer APP tlv also contains
* a table of 'app_count' APP objects defined above.
*/
struct dcb_peer_app_info {
__u8 willing;
__u8 error;
};
struct dcbmsg {
@ -139,6 +202,7 @@ struct dcbmsg {
* @DCB_CMD_SDCBX: set DCBX engine configuration
* @DCB_CMD_GFEATCFG: get DCBX features flags
* @DCB_CMD_SFEATCFG: set DCBX features negotiation flags
* @DCB_CMD_CEE_GET: get CEE aggregated configuration
*/
enum dcbnl_commands {
DCB_CMD_UNDEFINED,
@ -181,6 +245,8 @@ enum dcbnl_commands {
DCB_CMD_GFEATCFG,
DCB_CMD_SFEATCFG,
DCB_CMD_CEE_GET,
__DCB_CMD_ENUM_MAX,
DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
};
@ -203,6 +269,7 @@ enum dcbnl_commands {
* @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED)
* @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8)
* @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED)
* @DCB_ATTR_CEE: CEE std supported attributes (NLA_NESTED)
*/
enum dcbnl_attrs {
DCB_ATTR_UNDEFINED,
@ -226,15 +293,32 @@ enum dcbnl_attrs {
DCB_ATTR_DCBX,
DCB_ATTR_FEATCFG,
/* CEE nested attributes */
DCB_ATTR_CEE,
__DCB_ATTR_ENUM_MAX,
DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
};
/**
* enum ieee_attrs - IEEE 802.1Qaz get/set attributes
*
* @DCB_ATTR_IEEE_UNSPEC: unspecified
* @DCB_ATTR_IEEE_ETS: negotiated ETS configuration
* @DCB_ATTR_IEEE_PFC: negotiated PFC configuration
* @DCB_ATTR_IEEE_APP_TABLE: negotiated APP configuration
* @DCB_ATTR_IEEE_PEER_ETS: peer ETS configuration - get only
* @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
* @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
*/
enum ieee_attrs {
DCB_ATTR_IEEE_UNSPEC,
DCB_ATTR_IEEE_ETS,
DCB_ATTR_IEEE_PFC,
DCB_ATTR_IEEE_APP_TABLE,
DCB_ATTR_IEEE_PEER_ETS,
DCB_ATTR_IEEE_PEER_PFC,
DCB_ATTR_IEEE_PEER_APP,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
@ -246,6 +330,31 @@ enum ieee_attrs_app {
};
#define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1)
/**
* enum cee_attrs - CEE DCBX get attributes
*
* @DCB_ATTR_CEE_UNSPEC: unspecified
* @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only
* @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only
* @DCB_ATTR_CEE_PEER_APP: peer APP tlv - get only
*/
enum cee_attrs {
DCB_ATTR_CEE_UNSPEC,
DCB_ATTR_CEE_PEER_PG,
DCB_ATTR_CEE_PEER_PFC,
DCB_ATTR_CEE_PEER_APP_TABLE,
__DCB_ATTR_CEE_MAX
};
#define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1)
enum peer_app_attr {
DCB_ATTR_CEE_PEER_APP_UNSPEC,
DCB_ATTR_CEE_PEER_APP_INFO,
DCB_ATTR_CEE_PEER_APP,
__DCB_ATTR_CEE_PEER_APP_MAX
};
#define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1)
/**
* enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
*

View file

@ -279,8 +279,6 @@ enum dccp_state {
DCCP_MAX_STATES
};
#define DCCP_STATE_MASK 0x1f
enum {
DCCPF_OPEN = TCPF_ESTABLISHED,
DCCPF_REQUESTING = TCPF_SYN_SENT,

View file

@ -34,7 +34,10 @@ struct debug_obj {
/**
* struct debug_obj_descr - object type specific debug description structure
*
* @name: name of the object typee
* @debug_hint: function returning address, which have associated
* kernel symbol, to allow identify the object
* @fixup_init: fixup function, which is called when the init check
* fails
* @fixup_activate: fixup function, which is called when the activate check
@ -46,7 +49,7 @@ struct debug_obj {
*/
struct debug_obj_descr {
const char *name;
void *(*debug_hint) (void *addr);
int (*fixup_init) (void *addr, enum debug_obj_state state);
int (*fixup_activate) (void *addr, enum debug_obj_state state);
int (*fixup_destroy) (void *addr, enum debug_obj_state state);

View file

@ -16,7 +16,7 @@
/*
* Some architectures want to ensure there is no local data in their
* pre-boot environment, so that data can arbitarily relocated (via
* pre-boot environment, so that data can arbitrarily relocated (via
* GOT references). This is achieved by defining STATIC_RW_DATA to
* be null.
*/

View file

@ -197,7 +197,6 @@ struct dm_target {
struct dm_target_callbacks {
struct list_head list;
int (*congested_fn) (struct dm_target_callbacks *, int);
void (*unplug_fn)(struct dm_target_callbacks *);
};
int dm_register_target(struct target_type *t);
@ -285,11 +284,6 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
*/
int dm_table_complete(struct dm_table *t);
/*
* Unplug all devices in a table.
*/
void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/

View file

@ -128,9 +128,7 @@ struct device_driver {
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
#if defined(CONFIG_OF)
const struct of_device_id *of_match_table;
#endif
int (*probe) (struct device *dev);
int (*remove) (struct device *dev);
@ -422,6 +420,7 @@ struct device {
void *platform_data; /* Platform specific data, device
core doesn't touch it */
struct dev_pm_info power;
struct dev_power_domain *pwr_domain;
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
@ -441,9 +440,9 @@ struct device {
override */
/* arch specific additions */
struct dev_archdata archdata;
#ifdef CONFIG_OF
struct device_node *of_node;
#endif
struct device_node *of_node; /* associated device tree node */
const struct of_device_id *of_match; /* matching of_device_id from driver */
dev_t devt; /* dev_t, creates the sysfs "dev" */
@ -634,8 +633,12 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
/* drivers/base/power/shutdown.c */
extern void device_shutdown(void);
#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
/* drivers/base/sys.c */
extern void sysdev_shutdown(void);
#else
static inline void sysdev_shutdown(void) { }
#endif
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);

View file

@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 19
#define DM_VERSION_PATCHLEVEL 1
#define DM_VERSION_EXTRA "-ioctl (2011-01-07)"
#define DM_VERSION_MINOR 20
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2011-02-02)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@ -328,4 +328,10 @@ enum {
*/
#define DM_UUID_FLAG (1 << 14) /* In */
/*
* If set, all buffers are wiped after use. Use when sending
* or requesting sensitive data such as an encryption key.
*/
#define DM_SECURE_DATA_FLAG (1 << 15) /* In */
#endif /* _LINUX_DM_IOCTL_H */

View file

@ -434,7 +434,7 @@ struct dma_tx_state {
* zero or error code
* @device_tx_status: poll for transaction completion, the optional
* txstate parameter can be supplied with a pointer to get a
* struct with auxilary transfer status information, otherwise the call
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
*/

View file

@ -23,6 +23,53 @@ enum dmi_device_type {
DMI_DEV_TYPE_DEV_ONBOARD = -3,
};
enum dmi_entry_type {
DMI_ENTRY_BIOS = 0,
DMI_ENTRY_SYSTEM,
DMI_ENTRY_BASEBOARD,
DMI_ENTRY_CHASSIS,
DMI_ENTRY_PROCESSOR,
DMI_ENTRY_MEM_CONTROLLER,
DMI_ENTRY_MEM_MODULE,
DMI_ENTRY_CACHE,
DMI_ENTRY_PORT_CONNECTOR,
DMI_ENTRY_SYSTEM_SLOT,
DMI_ENTRY_ONBOARD_DEVICE,
DMI_ENTRY_OEMSTRINGS,
DMI_ENTRY_SYSCONF,
DMI_ENTRY_BIOS_LANG,
DMI_ENTRY_GROUP_ASSOC,
DMI_ENTRY_SYSTEM_EVENT_LOG,
DMI_ENTRY_PHYS_MEM_ARRAY,
DMI_ENTRY_MEM_DEVICE,
DMI_ENTRY_32_MEM_ERROR,
DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR,
DMI_ENTRY_MEM_DEV_MAPPED_ADDR,
DMI_ENTRY_BUILTIN_POINTING_DEV,
DMI_ENTRY_PORTABLE_BATTERY,
DMI_ENTRY_SYSTEM_RESET,
DMI_ENTRY_HW_SECURITY,
DMI_ENTRY_SYSTEM_POWER_CONTROLS,
DMI_ENTRY_VOLTAGE_PROBE,
DMI_ENTRY_COOLING_DEV,
DMI_ENTRY_TEMP_PROBE,
DMI_ENTRY_ELECTRICAL_CURRENT_PROBE,
DMI_ENTRY_OOB_REMOTE_ACCESS,
DMI_ENTRY_BIS_ENTRY,
DMI_ENTRY_SYSTEM_BOOT,
DMI_ENTRY_MGMT_DEV,
DMI_ENTRY_MGMT_DEV_COMPONENT,
DMI_ENTRY_MGMT_DEV_THRES,
DMI_ENTRY_MEM_CHANNEL,
DMI_ENTRY_IPMI_DEV,
DMI_ENTRY_SYS_POWER_SUPPLY,
DMI_ENTRY_ADDITIONAL,
DMI_ENTRY_ONBOARD_DEV_EXT,
DMI_ENTRY_MGMT_CONTROLLER_HOST,
DMI_ENTRY_INACTIVE = 126,
DMI_ENTRY_END_OF_TABLE = 127,
};
struct dmi_header {
u8 type;
u8 length;

View file

@ -36,7 +36,7 @@
#include <sys/wait.h>
#include <limits.h>
/* Altough the Linux source code makes a difference between
/* Although the Linux source code makes a difference between
generic endianness and the bitfields' endianness, there is no
architecture as of Linux-2.6.24-rc4 where the bitfileds' endianness
does not match the generic endianness. */
@ -53,10 +53,10 @@
extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.9"
#define REL_VERSION "8.3.10"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 95
#define PRO_VERSION_MAX 96
enum drbd_io_error_p {
@ -96,8 +96,14 @@ enum drbd_on_no_data {
OND_SUSPEND_IO
};
enum drbd_on_congestion {
OC_BLOCK,
OC_PULL_AHEAD,
OC_DISCONNECT,
};
/* KEEP the order, do not delete or insert. Only append. */
enum drbd_ret_codes {
enum drbd_ret_code {
ERR_CODE_BASE = 100,
NO_ERROR = 101,
ERR_LOCAL_ADDR = 102,
@ -146,6 +152,9 @@ enum drbd_ret_codes {
ERR_PERM = 152,
ERR_NEED_APV_93 = 153,
ERR_STONITH_AND_PROT_A = 154,
ERR_CONG_NOT_PROTO_A = 155,
ERR_PIC_AFTER_DEP = 156,
ERR_PIC_PEER_DEP = 157,
/* insert new ones above this line */
AFTER_LAST_ERR_CODE
@ -175,7 +184,7 @@ enum drbd_conns {
/* These temporal states are all used on the way
* from >= C_CONNECTED to Unconnected.
* The 'disconnect reason' states
* I do not allow to change beween them. */
* I do not allow to change between them. */
C_TIMEOUT,
C_BROKEN_PIPE,
C_NETWORK_FAILURE,
@ -199,6 +208,10 @@ enum drbd_conns {
C_VERIFY_T,
C_PAUSED_SYNC_S,
C_PAUSED_SYNC_T,
C_AHEAD,
C_BEHIND,
C_MASK = 31
};
@ -259,7 +272,7 @@ union drbd_state {
unsigned int i;
};
enum drbd_state_ret_codes {
enum drbd_state_rv {
SS_CW_NO_NEED = 4,
SS_CW_SUCCESS = 3,
SS_NOTHING_TO_DO = 2,
@ -290,7 +303,7 @@ enum drbd_state_ret_codes {
extern const char *drbd_conn_str(enum drbd_conns);
extern const char *drbd_role_str(enum drbd_role);
extern const char *drbd_disk_str(enum drbd_disk_state);
extern const char *drbd_set_st_err_str(enum drbd_state_ret_codes);
extern const char *drbd_set_st_err_str(enum drbd_state_rv);
#define SHARED_SECRET_MAX 64

View file

@ -16,7 +16,8 @@
#define DEBUG_RANGE_CHECK 0
#define DRBD_MINOR_COUNT_MIN 1
#define DRBD_MINOR_COUNT_MAX 255
#define DRBD_MINOR_COUNT_MAX 256
#define DRBD_MINOR_COUNT_DEF 32
#define DRBD_DIALOG_REFRESH_MIN 0
#define DRBD_DIALOG_REFRESH_MAX 600
@ -42,7 +43,7 @@
/* net { */
/* timeout, unit centi seconds
* more than one minute timeout is not usefull */
* more than one minute timeout is not useful */
#define DRBD_TIMEOUT_MIN 1
#define DRBD_TIMEOUT_MAX 600
#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
@ -67,7 +68,7 @@
#define DRBD_MAX_EPOCH_SIZE_MAX 20000
#define DRBD_MAX_EPOCH_SIZE_DEF 2048
/* I don't think that a tcp send buffer of more than 10M is usefull */
/* I don't think that a tcp send buffer of more than 10M is useful */
#define DRBD_SNDBUF_SIZE_MIN 0
#define DRBD_SNDBUF_SIZE_MAX (10<<20)
#define DRBD_SNDBUF_SIZE_DEF 0
@ -100,7 +101,7 @@
#define DRBD_RATE_MAX (4 << 20)
#define DRBD_RATE_DEF 250 /* kb/second */
/* less than 7 would hit performance unneccessarily.
/* less than 7 would hit performance unnecessarily.
* 3833 is the largest prime that still does fit
* into 64 sectors of activity log */
#define DRBD_AL_EXTENTS_MIN 7
@ -129,6 +130,7 @@
#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT
#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
#define DRBD_MAX_BIO_BVECS_MIN 0
#define DRBD_MAX_BIO_BVECS_MAX 128
@ -154,5 +156,13 @@
#define DRBD_C_MIN_RATE_MAX (4 << 20)
#define DRBD_C_MIN_RATE_DEF 4096
#define DRBD_CONG_FILL_MIN 0
#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
#define DRBD_CONG_FILL_DEF 0
#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
#define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX
#define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF
#undef RANGE
#endif

View file

@ -56,6 +56,9 @@ NL_PACKET(net_conf, 5,
NL_INTEGER( 39, T_MAY_IGNORE, rr_conflict)
NL_INTEGER( 40, T_MAY_IGNORE, ping_timeo)
NL_INTEGER( 67, T_MAY_IGNORE, rcvbuf_size)
NL_INTEGER( 81, T_MAY_IGNORE, on_congestion)
NL_INTEGER( 82, T_MAY_IGNORE, cong_fill)
NL_INTEGER( 83, T_MAY_IGNORE, cong_extents)
/* 59 addr_family was available in GIT, never released */
NL_BIT( 60, T_MANDATORY, mind_af)
NL_BIT( 27, T_MAY_IGNORE, want_lose)
@ -66,7 +69,9 @@ NL_PACKET(net_conf, 5,
NL_BIT( 70, T_MANDATORY, dry_run)
)
NL_PACKET(disconnect, 6, )
NL_PACKET(disconnect, 6,
NL_BIT( 84, T_MAY_IGNORE, force)
)
NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size)
@ -143,9 +148,13 @@ NL_PACKET(new_c_uuid, 26,
NL_BIT( 63, T_MANDATORY, clear_bm)
)
#ifdef NL_RESPONSE
NL_RESPONSE(return_code_only, 27)
#endif
#undef NL_PACKET
#undef NL_INTEGER
#undef NL_INT64
#undef NL_BIT
#undef NL_STRING
#undef NL_RESPONSE

View file

@ -7,6 +7,7 @@
/* declare packet_type enums */
enum packet_types {
#define NL_PACKET(name, number, fields) P_ ## name = number,
#define NL_RESPONSE(name, number) P_ ## name = number,
#define NL_INTEGER(pn, pr, member)
#define NL_INT64(pn, pr, member)
#define NL_BIT(pn, pr, member)

View file

@ -16,9 +16,18 @@
/**
* struct dw_dma_platform_data - Controller configuration parameters
* @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
unsigned char chan_priority;
};
/**
@ -33,6 +42,30 @@ enum dw_dma_slave_width {
DW_DMA_SLAVE_WIDTH_32BIT,
};
/* bursts size */
enum dw_dma_msize {
DW_DMA_MSIZE_1,
DW_DMA_MSIZE_4,
DW_DMA_MSIZE_8,
DW_DMA_MSIZE_16,
DW_DMA_MSIZE_32,
DW_DMA_MSIZE_64,
DW_DMA_MSIZE_128,
DW_DMA_MSIZE_256,
};
/* flow controller */
enum dw_dma_fc {
DW_DMA_FC_D_M2M,
DW_DMA_FC_D_M2P,
DW_DMA_FC_D_P2M,
DW_DMA_FC_D_P2P,
DW_DMA_FC_P_P2M,
DW_DMA_FC_SP_P2P,
DW_DMA_FC_P_M2P,
DW_DMA_FC_DP_P2P,
};
/**
* struct dw_dma_slave - Controller-specific information about a slave
*
@ -44,6 +77,11 @@ enum dw_dma_slave_width {
* @reg_width: peripheral register width
* @cfg_hi: Platform-specific initializer for the CFG_HI register
* @cfg_lo: Platform-specific initializer for the CFG_LO register
* @src_master: src master for transfers on allocated channel.
* @dst_master: dest master for transfers on allocated channel.
* @src_msize: src burst size.
* @dst_msize: dest burst size.
* @fc: flow controller for DMA transfer
*/
struct dw_dma_slave {
struct device *dma_dev;
@ -52,6 +90,11 @@ struct dw_dma_slave {
enum dw_dma_slave_width reg_width;
u32 cfg_hi;
u32 cfg_lo;
u8 src_master;
u8 dst_master;
u8 src_msize;
u8 dst_msize;
u8 fc;
};
/* Platform-configurable bits in CFG_HI */
@ -62,7 +105,6 @@ struct dw_dma_slave {
#define DWC_CFGH_DST_PER(x) ((x) << 11)
/* Platform-configurable bits in CFG_LO */
#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */
#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
#define DWC_CFGL_LOCK_CH_XACT (2 << 12)

View file

@ -31,6 +31,10 @@ struct _ddebug {
* writes commands to <debugfs>/dynamic_debug/control
*/
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1)
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
#define _DPRINTK_FLAGS_DEFAULT 0
unsigned int flags:8;
char enabled;
@ -42,6 +46,8 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
#if defined(CONFIG_DYNAMIC_DEBUG)
extern int ddebug_remove_module(const char *mod_name);
extern int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
#define dynamic_pr_debug(fmt, ...) do { \
static struct _ddebug descriptor \
@ -50,7 +56,7 @@ extern int ddebug_remove_module(const char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
_DPRINTK_FLAGS_DEFAULT }; \
if (unlikely(descriptor.enabled)) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)

View file

@ -397,4 +397,41 @@ static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
*addr &= PAGE_MASK;
}
#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
/*
* EFI Variable support.
*
* Different firmware drivers can expose their EFI-like variables using
* the following.
*/
struct efivar_operations {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
};
struct efivars {
/*
* ->lock protects two things:
* 1) ->list - adds, removals, reads, writes
* 2) ops.[gs]et_variable() calls.
* It must not be held when creating sysfs entries or calling kmalloc.
* ops.get_next_variable() is only called from register_efivars(),
* which is protected by the BKL, so that path is safe.
*/
spinlock_t lock;
struct list_head list;
struct kset *kset;
struct bin_attribute *new_var, *del_var;
const struct efivar_operations *ops;
};
int register_efivars(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *parent_kobj);
void unregister_efivars(struct efivars *efivars);
#endif /* CONFIG_EFI_VARS */
#endif /* _LINUX_EFI_H */

View file

@ -20,7 +20,6 @@ typedef void (elevator_bio_merged_fn) (struct request_queue *,
typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
typedef int (elevator_queue_empty_fn) (struct request_queue *);
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
typedef int (elevator_may_queue_fn) (struct request_queue *, int);
@ -46,7 +45,6 @@ struct elevator_ops
elevator_activate_req_fn *elevator_activate_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
elevator_queue_empty_fn *elevator_queue_empty_fn;
elevator_completed_req_fn *elevator_completed_req_fn;
elevator_request_list_fn *elevator_former_req_fn;
@ -101,17 +99,16 @@ struct elevator_queue
*/
extern void elv_dispatch_sort(struct request_queue *, struct request *);
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
extern void elv_add_request(struct request_queue *, struct request *, int, int);
extern void __elv_add_request(struct request_queue *, struct request *, int, int);
extern void elv_insert(struct request_queue *, struct request *, int);
extern void elv_add_request(struct request_queue *, struct request *, int);
extern void __elv_add_request(struct request_queue *, struct request *, int);
extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern int elv_try_merge(struct request *, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
extern void elv_bio_merged(struct request_queue *q, struct request *,
struct bio *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
@ -167,6 +164,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
#define ELEVATOR_INSERT_REQUEUE 4
#define ELEVATOR_INSERT_FLUSH 5
#define ELEVATOR_INSERT_SORT_MERGE 6
/*
* return values from elevator_may_queue_fn

View file

@ -52,6 +52,14 @@ static inline void * __must_check ERR_CAST(const void *ptr)
return (void *) ptr;
}
static inline int __must_check PTR_RET(const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
#endif
#endif /* _LINUX_ERR_H */

View file

@ -13,6 +13,9 @@
#ifndef _LINUX_ETHTOOL_H
#define _LINUX_ETHTOOL_H
#ifdef __KERNEL__
#include <linux/compat.h>
#endif
#include <linux/types.h>
#include <linux/if_ether.h>
@ -251,6 +254,7 @@ enum ethtool_stringset {
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
};
/* for passing string sets for data tagging */
@ -449,6 +453,37 @@ struct ethtool_rxnfc {
__u32 rule_locs[0];
};
#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union {
struct ethtool_tcpip4_spec tcp_ip4_spec;
struct ethtool_tcpip4_spec udp_ip4_spec;
struct ethtool_tcpip4_spec sctp_ip4_spec;
struct ethtool_ah_espip4_spec ah_ip4_spec;
struct ethtool_ah_espip4_spec esp_ip4_spec;
struct ethtool_usrip4_spec usr_ip4_spec;
struct ethhdr ether_spec;
u8 hdata[72];
} h_u, m_u;
compat_u64 ring_cookie;
u32 location;
};
struct compat_ethtool_rxnfc {
u32 cmd;
u32 flow_type;
compat_u64 data;
struct compat_ethtool_rx_flow_spec fs;
u32 rule_cnt;
u32 rule_locs[0];
};
#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
/**
* struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
* @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
@ -523,10 +558,99 @@ struct ethtool_flash {
char data[ETHTOOL_FLASH_MAX_FILENAME];
};
/* for returning and changing feature sets */
/**
* struct ethtool_get_features_block - block with state of 32 features
* @available: mask of changeable features
* @requested: mask of features requested to be enabled if possible
* @active: mask of currently enabled features
* @never_changed: mask of features not changeable for any device
*/
struct ethtool_get_features_block {
__u32 available;
__u32 requested;
__u32 active;
__u32 never_changed;
};
/**
* struct ethtool_gfeatures - command to get state of device's features
* @cmd: command number = %ETHTOOL_GFEATURES
* @size: in: number of elements in the features[] array;
* out: number of elements in features[] needed to hold all features
* @features: state of features
*/
struct ethtool_gfeatures {
__u32 cmd;
__u32 size;
struct ethtool_get_features_block features[0];
};
/**
* struct ethtool_set_features_block - block with request for 32 features
* @valid: mask of features to be changed
* @requested: values of features to be changed
*/
struct ethtool_set_features_block {
__u32 valid;
__u32 requested;
};
/**
* struct ethtool_sfeatures - command to request change in device's features
* @cmd: command number = %ETHTOOL_SFEATURES
* @size: array size of the features[] array
* @features: feature change masks
*/
struct ethtool_sfeatures {
__u32 cmd;
__u32 size;
struct ethtool_set_features_block features[0];
};
/*
* %ETHTOOL_SFEATURES changes features present in features[].valid to the
* values of corresponding bits in features[].requested. Bits in .requested
* not set in .valid or not changeable are ignored.
*
* Returns %EINVAL when .valid contains undefined or never-changeable bits
* or size is not equal to required number of features words (32-bit blocks).
* Returns >= 0 if request was completed; bits set in the value mean:
* %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not
* changeable (not present in %ETHTOOL_GFEATURES' features[].available)
* those bits were ignored.
* %ETHTOOL_F_WISH - some or all changes requested were recorded but the
* resulting state of bits masked by .valid is not equal to .requested.
* Probably there are other device-specific constraints on some features
* in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered
* here as though ignored bits were cleared.
* %ETHTOOL_F_COMPAT - some or all changes requested were made by calling
* compatibility functions. Requested offload state cannot be properly
* managed by kernel.
*
* Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of
* bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands
* for ETH_SS_FEATURES string set. First entry in the table corresponds to least
* significant bit in features[0] fields. Empty strings mark undefined features.
*/
enum ethtool_sfeatures_retval_bits {
ETHTOOL_F_UNSUPPORTED__BIT,
ETHTOOL_F_WISH__BIT,
ETHTOOL_F_COMPAT__BIT,
};
#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT)
#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT)
#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT)
#ifdef __KERNEL__
#include <linux/rculist.h>
/* needed by dev_disable_lro() */
extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
struct ethtool_rx_ntuple_flow_spec_container {
struct ethtool_rx_ntuple_flow_spec fs;
struct list_head list;
@ -543,7 +667,6 @@ struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
u32 ethtool_op_get_rx_csum(struct net_device *dev);
u32 ethtool_op_get_tx_csum(struct net_device *dev);
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
@ -557,6 +680,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data);
u32 ethtool_op_get_flags(struct net_device *dev);
int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
void ethtool_ntuple_flush(struct net_device *dev);
bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
/**
* &ethtool_ops - Alter and report network device settings
@ -744,6 +868,9 @@ struct ethtool_ops {
#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */
#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */
#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */
#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET

View file

@ -1,5 +1,5 @@
/*
* include/linux/eventpoll.h ( Efficent event polling implementation )
* include/linux/eventpoll.h ( Efficient event polling implementation )
* Copyright (C) 2001,...,2006 Davide Libenzi
*
* This program is free software; you can redistribute it and/or modify

View file

@ -8,6 +8,9 @@ struct inode;
struct super_block;
struct vfsmount;
/* limit the handle size to NFSv4 handle size now */
#define MAX_HANDLE_SZ 128
/*
* The fileid_type identifies how the file within the filesystem is encoded.
* In theory this is freely set and parsed by the filesystem, but we try to
@ -117,12 +120,14 @@ struct fid {
* encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
* file refered to by the &struct dentry @de. If the @connectable flag is
* file referred to by the &struct dentry @de. If the @connectable flag is
* set, the encode_fh() should store sufficient information so that a good
* attempt can be made to find not only the file but also it's place in the
* filesystem. This typically means storing a reference to de->d_parent in
* the filehandle fragment. encode_fh() should return the number of bytes
* stored or a negative error code such as %-ENOSPC
* the filehandle fragment. encode_fh() should return the fileid_type on
* success and on error returns 255 (if the space needed to encode fh is
* greater than @max_len*4 bytes). On error @max_len contains the minimum
* size(in 4 byte unit) needed to encode the file handle.
*
* fh_to_dentry:
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle

View file

@ -418,13 +418,13 @@ struct ext3_inode {
#define EXT2_MOUNT_DATA_FLAGS EXT3_MOUNT_DATA_FLAGS
#endif
#define ext3_set_bit ext2_set_bit
#define ext3_set_bit __test_and_set_bit_le
#define ext3_set_bit_atomic ext2_set_bit_atomic
#define ext3_clear_bit ext2_clear_bit
#define ext3_clear_bit __test_and_clear_bit_le
#define ext3_clear_bit_atomic ext2_clear_bit_atomic
#define ext3_test_bit ext2_test_bit
#define ext3_find_first_zero_bit ext2_find_first_zero_bit
#define ext3_find_next_zero_bit ext2_find_next_zero_bit
#define ext3_test_bit test_bit_le
#define ext3_find_first_zero_bit find_first_zero_bit_le
#define ext3_find_next_zero_bit find_next_zero_bit_le
/*
* Maximal mount counts between two filesystem checks
@ -884,7 +884,8 @@ extern int ext3fs_dirhash(const char *name, int len, struct
dx_hash_info *hinfo);
/* ialloc.c */
extern struct inode * ext3_new_inode (handle_t *, struct inode *, int);
extern struct inode * ext3_new_inode (handle_t *, struct inode *,
const struct qstr *, int);
extern void ext3_free_inode (handle_t *, struct inode *);
extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
extern unsigned long ext3_count_free_inodes (struct super_block *);

View file

@ -152,6 +152,8 @@
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */
#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */
struct fb_fix_screeninfo {
char id[16]; /* identification string eg "TT Builtin" */
unsigned long smem_start; /* Start of frame buffer mem */
@ -532,14 +534,14 @@ struct fb_cursor_user {
#define FB_EVENT_GET_CONSOLE_MAP 0x07
/* CONSOLE-SPECIFIC: set console to framebuffer mapping */
#define FB_EVENT_SET_CONSOLE_MAP 0x08
/* A hardware display blank change occured */
/* A hardware display blank change occurred */
#define FB_EVENT_BLANK 0x09
/* Private modelist is to be replaced */
#define FB_EVENT_NEW_MODELIST 0x0A
/* The resolution of the passed in fb_info about to change and
all vc's should be changed */
#define FB_EVENT_MODE_CHANGE_ALL 0x0B
/* A software display blank change occured */
/* A software display blank change occurred */
#define FB_EVENT_CONBLANK 0x0C
/* Get drawing requirements */
#define FB_EVENT_GET_REQ 0x0D
@ -803,7 +805,7 @@ struct fb_tile_ops {
/* A driver may set this flag to indicate that it does want a set_par to be
* called every time when fbcon_switch is executed. The advantage is that with
* this flag set you can really be sure that set_par is always called before
* any of the functions dependant on the correct hardware state or altering
* any of the functions dependent on the correct hardware state or altering
* that state, even if you are using some broken X releases. The disadvantage
* is that it introduces unwanted delays to every console switch if set_par
* is slow. It is a good idea to try this flag in the drivers initialization
@ -875,7 +877,7 @@ struct fb_info {
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
/* we need the PCI or similiar aperture base/size not
/* we need the PCI or similar aperture base/size not
smem_start/size as smem_start may just be an object
allocated inside the aperture so may not actually overlap */
struct apertures_struct {

View file

@ -46,6 +46,7 @@
unlinking file. */
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
#ifdef __KERNEL__

View file

@ -29,6 +29,8 @@ static inline void fput_light(struct file *file, int fput_needed)
extern struct file *fget(unsigned int fd);
extern struct file *fget_light(unsigned int fd, int *fput_needed);
extern struct file *fget_raw(unsigned int fd);
extern struct file *fget_raw_light(unsigned int fd, int *fput_needed);
extern void set_close_on_exec(unsigned int fd, int flag);
extern void put_filp(struct file *);
extern int alloc_fd(unsigned start, unsigned flags);

View file

@ -900,7 +900,7 @@ struct fw_cdev_get_cycle_timer2 {
/**
* struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
* @closure: Passed back to userspace in correponding iso resource events
* @closure: Passed back to userspace in corresponding iso resource events
* @channels: Isochronous channels of which one is to be (de)allocated
* @bandwidth: Isochronous bandwidth units to be (de)allocated
* @handle: Handle to the allocation, written by the kernel (only valid in

View file

@ -42,6 +42,10 @@
#define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800
#define CSR_OMPR 0x900
#define CSR_OPCR(i) (0x904 + (i) * 4)
#define CSR_IMPR 0x980
#define CSR_IPCR(i) (0x984 + (i) * 4)
#define CSR_FCP_COMMAND 0xB00
#define CSR_FCP_RESPONSE 0xD00
#define CSR_FCP_END 0xF00
@ -89,7 +93,7 @@ struct fw_card {
int current_tlabel;
u64 tlabel_mask;
struct list_head transaction_list;
unsigned long reset_jiffies;
u64 reset_jiffies;
u32 split_timeout_hi;
u32 split_timeout_lo;
@ -441,5 +445,8 @@ int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
void fw_iso_context_destroy(struct fw_iso_context *ctx);
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
bool allocate, __be32 buffer[2]);
#endif /* _LINUX_FIREWIRE_H */

View file

@ -39,7 +39,7 @@ struct builtin_fw {
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_nowait(
struct module *module, int uevent,
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context));
@ -52,7 +52,7 @@ static inline int request_firmware(const struct firmware **fw,
return -EINVAL;
}
static inline int request_firmware_nowait(
struct module *module, int uevent,
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{

View file

@ -61,7 +61,7 @@ struct flex_array {
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int end, gfp_t flags);
unsigned int nr_elements, gfp_t flags);
void flex_array_free(struct flex_array *fa);
void flex_array_free_parts(struct flex_array *fa);
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,

View file

@ -102,6 +102,9 @@ struct inodes_stat_t {
/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)0x4000)
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
@ -135,16 +138,10 @@ struct inodes_stat_t {
* block layer could (in theory) choose to ignore this
* request if it runs into resource problems.
* WRITE A normal async write. Device will be plugged.
* WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
* WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
* the hint that someone will be waiting on this IO
* shortly. The device must still be unplugged explicitly,
* WRITE_SYNC_PLUG does not do this as we could be
* submitting more writes before we actually wait on any
* of them.
* WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
* immediately after submission. The write equivalent
* of READ_SYNC.
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
* shortly. The write equivalent of READ_SYNC.
* WRITE_ODIRECT Special case write for O_DIRECT only.
* WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
* WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
* non-volatile media on completion.
@ -160,18 +157,14 @@ struct inodes_stat_t {
#define WRITE RW_MASK
#define READA RWA_MASK
#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
#define READ_SYNC (READ | REQ_SYNC)
#define READ_META (READ | REQ_META)
#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
#define WRITE_ODIRECT (WRITE | REQ_SYNC)
#define WRITE_META (WRITE | REQ_META)
#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FLUSH)
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FLUSH | REQ_FUA)
#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define SEL_IN 1
#define SEL_OUT 2
@ -364,6 +357,8 @@ struct inodes_stat_t {
#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define FS_EXTENT_FL 0x00080000 /* Extents */
#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
#define FS_COW_FL 0x02000000 /* Cow file */
#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
@ -470,7 +465,7 @@ struct iattr {
struct timespec ia_ctime;
/*
* Not an attribute, but an auxilary info for filesystems wanting to
* Not an attribute, but an auxiliary info for filesystems wanting to
* implement an ftruncate() like method. NOTE: filesystem should
* check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
*/
@ -583,7 +578,6 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
void (*sync_page)(struct page *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
@ -619,6 +613,8 @@ struct address_space_operations {
int (*error_remove_page)(struct address_space *, struct page *);
};
extern const struct address_space_operations empty_aops;
/*
* pagecache_write_begin/pagecache_write_end must be used by general code
* to write into the pagecache.
@ -649,18 +645,19 @@ struct address_space {
spinlock_t private_lock; /* for use by the address_space */
struct list_head private_list; /* ditto */
struct address_space *assoc_mapping; /* ditto */
struct mutex unmap_mutex; /* to protect unmapping */
} __attribute__((aligned(sizeof(long))));
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least signficant bit
* must be enforced here for CRIS, to let the least significant bit
* of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
*/
struct block_device {
dev_t bd_dev; /* not a kdev_t - it's a search key */
int bd_openers;
struct inode * bd_inode; /* will die */
struct super_block * bd_super;
int bd_openers;
struct mutex bd_mutex; /* open/close mutex */
struct list_head bd_inodes;
void * bd_claiming;
@ -797,8 +794,7 @@ struct inode {
#endif
#ifdef CONFIG_IMA
/* protected by i_lock */
unsigned int i_readcount; /* struct files open RO */
atomic_t i_readcount; /* struct files open RO */
#endif
atomic_t i_writecount;
#ifdef CONFIG_SECURITY
@ -977,6 +973,13 @@ struct file {
#endif
};
struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
unsigned char f_handle[0];
};
#define get_file(x) atomic_long_inc(&(x)->f_count)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
@ -1400,6 +1403,7 @@ struct super_block {
wait_queue_head_t s_wait_unfrozen;
char s_id[32]; /* Informational name */
u8 s_uuid[16]; /* UUID */
void *s_fs_info; /* Filesystem private info */
fmode_t s_mode;
@ -1446,8 +1450,13 @@ enum {
#define put_fs_excl() atomic_dec(&current->fs_excl)
#define has_fs_excl() atomic_read(&current->fs_excl)
#define is_owner_or_cap(inode) \
((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER))
/*
* until VFS tracks user namespaces for inodes, just make all files
* belong to init_user_ns
*/
extern struct user_namespace init_user_ns;
#define inode_userns(inode) (&init_user_ns)
extern bool inode_owner_or_capable(const struct inode *inode);
/* not quite ready to be deprecated, but... */
extern void lock_super(struct super_block *);
@ -1620,6 +1629,8 @@ struct super_operations {
void (*umount_begin) (struct super_block *);
int (*show_options)(struct seq_file *, struct vfsmount *);
int (*show_devname)(struct seq_file *, struct vfsmount *);
int (*show_path)(struct seq_file *, struct vfsmount *);
int (*show_stats)(struct seq_file *, struct vfsmount *);
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
@ -1629,7 +1640,7 @@ struct super_operations {
};
/*
* Inode state bits. Protected by inode_lock.
* Inode state bits. Protected by inode->i_lock
*
* Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
* I_DIRTY_DATASYNC and I_DIRTY_PAGES.
@ -1783,8 +1794,6 @@ int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
const char *name;
int fs_flags;
int (*get_sb) (struct file_system_type *, int,
const char *, void *, struct vfsmount *);
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
@ -1807,24 +1816,12 @@ extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
extern struct dentry *mount_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern int get_sb_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern int get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
@ -1840,7 +1837,6 @@ extern struct dentry *mount_pseudo(struct file_system_type *, char *,
const struct super_operations *ops,
const struct dentry_operations *dops,
unsigned long);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
static inline void sb_mark_dirty(struct super_block *sb)
{
@ -1873,6 +1869,8 @@ extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
struct vfsmount *);
extern int vfs_statfs(struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
@ -1989,6 +1987,8 @@ extern int do_fallocate(struct file *file, int mode, loff_t offset,
extern long do_sys_open(int dfd, const char __user *filename, int flags,
int mode);
extern struct file *filp_open(const char *, int, int);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int);
extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
@ -2139,7 +2139,7 @@ extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *);
extern int __invalidate_device(struct block_device *, bool);
extern int invalidate_partition(struct gendisk *, int);
#endif
unsigned long invalidate_mapping_pages(struct address_space *mapping,
@ -2199,15 +2199,31 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
}
#ifdef CONFIG_IMA
static inline void i_readcount_dec(struct inode *inode)
{
BUG_ON(!atomic_read(&inode->i_readcount));
atomic_dec(&inode->i_readcount);
}
static inline void i_readcount_inc(struct inode *inode)
{
atomic_inc(&inode->i_readcount);
}
#else
static inline void i_readcount_dec(struct inode *inode)
{
return;
}
static inline void i_readcount_inc(struct inode *inode)
{
return;
}
#endif
extern int do_pipe_flags(int *, int);
extern struct file *create_read_pipe(struct file *f, int flags);
extern struct file *create_write_pipe(int flags);
extern void free_write_pipe(struct file *);
extern struct file *do_filp_open(int dfd, const char *pathname,
int open_flag, int mode, int acc_mode);
extern int may_open(struct path *, int, int);
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern struct file * open_exec(const char *);
@ -2225,6 +2241,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);

View file

@ -236,7 +236,7 @@ struct fscache_cache_ops {
/* unpin an object in the cache */
void (*unpin_object)(struct fscache_object *object);
/* store the updated auxilliary data on an object */
/* store the updated auxiliary data on an object */
void (*update_object)(struct fscache_object *object);
/* discard the resources pinned by an object and effect retirement if

View file

@ -102,9 +102,9 @@ struct fscache_cookie_def {
*/
void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
/* get the auxilliary data from netfs data
/* get the auxiliary data from netfs data
* - this function can be absent if the index carries no state data
* - should store the auxilliary data in the buffer
* - should store the auxiliary data in the buffer
* - should return the amount of amount stored
* - not permitted to return an error
* - the netfs data from the cookie being used as the source is
@ -117,7 +117,7 @@ struct fscache_cookie_def {
/* consult the netfs about the state of an object
* - this function can be absent if the index carries no state data
* - the netfs data from the cookie being used as the target is
* presented, as is the auxilliary data
* presented, as is the auxiliary data
*/
enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
const void *data,

View file

@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
static inline int task_curr_ret_stack(struct task_struct *t)
{
@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)

View file

@ -37,7 +37,7 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
int lock_depth;
int padding;
};
#define FTRACE_MAX_EVENT \
@ -208,7 +208,6 @@ struct ftrace_event_call {
#define PERF_MAX_TRACE_SIZE 2048
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
extern void destroy_preds(struct ftrace_event_call *call);

View file

@ -109,7 +109,7 @@ struct hd_struct {
int make_it_fail;
#endif
unsigned long stamp;
int in_flight[2];
atomic_t in_flight[2];
#ifdef CONFIG_SMP
struct disk_stats __percpu *dkstats;
#else
@ -370,21 +370,21 @@ static inline void free_part_stats(struct hd_struct *part)
static inline void part_inc_in_flight(struct hd_struct *part, int rw)
{
part->in_flight[rw]++;
atomic_inc(&part->in_flight[rw]);
if (part->partno)
part_to_disk(part)->part0.in_flight[rw]++;
atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
}
static inline void part_dec_in_flight(struct hd_struct *part, int rw)
{
part->in_flight[rw]--;
atomic_dec(&part->in_flight[rw]);
if (part->partno)
part_to_disk(part)->part0.in_flight[rw]--;
atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
}
static inline int part_in_flight(struct hd_struct *part)
{
return part->in_flight[0] + part->in_flight[1];
return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]);
}
static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)

View file

@ -35,6 +35,7 @@ struct vm_area_struct;
#define ___GFP_NOTRACK 0
#endif
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
/*
* GFP bitmasks..
@ -83,6 +84,7 @@ struct vm_area_struct;
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
/*
* This may seem redundant, but it's a way of annotating false positives vs.
@ -332,16 +334,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr);
struct vm_area_struct *vma, unsigned long addr,
int node);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr) \
#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
alloc_pages_vma(gfp_mask, 0, vma, addr)
#define alloc_page_vma(gfp_mask, vma, addr) \
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
alloc_pages_vma(gfp_mask, 0, vma, addr, node)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);

View file

@ -93,13 +93,6 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
# include <linux/sched.h>
# define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0)
#else
# define PREEMPT_INATOMIC_BASE 0
#endif
#if defined(CONFIG_PREEMPT)
# define PREEMPT_CHECK_OFFSET 1
#else
@ -113,7 +106,7 @@
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
/*
* Check whether we were atomic before we did preempt_disable():

View file

@ -0,0 +1,29 @@
#ifndef __HID_ROCCAT_H
#define __HID_ROCCAT_H
/*
* Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/hid.h>
#include <linux/types.h>
#define ROCCATIOCGREPSIZE _IOR('H', 0xf1, int)
#ifdef __KERNEL__
int roccat_connect(struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
#endif
#endif

View file

@ -504,6 +504,9 @@ struct hid_device { /* device report descriptor */
struct hid_usage *, __s32);
void (*hiddev_report_event) (struct hid_device *, struct hid_report *);
/* handler for raw input (Get_Report) data, used by hidraw */
int (*hid_get_raw_report) (struct hid_device *, unsigned char, __u8 *, size_t, unsigned char);
/* handler for raw output data, used by hidraw */
int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char);
@ -638,7 +641,7 @@ struct hid_driver {
struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max);
void (*feature_mapping)(struct hid_device *hdev,
struct hid_input *hidinput, struct hid_field *field,
struct hid_field *field,
struct hid_usage *usage);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
@ -796,7 +799,7 @@ static inline int __must_check hid_parse(struct hid_device *hdev)
*
* Call this in probe function *after* hid_parse. This will setup HW buffers
* and start the device (if not deffered to device open). hid_hw_stop must be
* called if this was successfull.
* called if this was successful.
*/
static inline int __must_check hid_hw_start(struct hid_device *hdev,
unsigned int connect_mask)

View file

@ -35,6 +35,9 @@ struct hidraw_devinfo {
#define HIDIOCGRAWINFO _IOR('H', 0x03, struct hidraw_devinfo)
#define HIDIOCGRAWNAME(len) _IOC(_IOC_READ, 'H', 0x04, len)
#define HIDIOCGRAWPHYS(len) _IOC(_IOC_READ, 'H', 0x05, len)
/* The first byte of SFEATURE and GFEATURE is the report number */
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64

View file

@ -101,7 +101,7 @@ int hp_sdc_dequeue_transaction(hp_sdc_transaction *this);
#define HP_SDC_STATUS_REG 0x40 /* Data from an i8042 register */
#define HP_SDC_STATUS_HILCMD 0x50 /* Command from HIL MLC */
#define HP_SDC_STATUS_HILDATA 0x60 /* Data from HIL MLC */
#define HP_SDC_STATUS_PUP 0x70 /* Sucessful power-up self test */
#define HP_SDC_STATUS_PUP 0x70 /* Successful power-up self test */
#define HP_SDC_STATUS_KCOOKED 0x80 /* Key from cooked kbd */
#define HP_SDC_STATUS_KRPG 0xc0 /* Key from Repeat Gen */
#define HP_SDC_STATUS_KMOD_SUP 0x10 /* Shift key is up */

View file

@ -54,11 +54,13 @@ enum hrtimer_restart {
* 0x00 inactive
* 0x01 enqueued into rbtree
* 0x02 callback function running
* 0x04 timer is migrated to another cpu
*
* Special cases:
* 0x03 callback function running and enqueued
* (was requeued on another CPU)
* 0x09 timer was migrated on CPU hotunplug
* 0x05 timer was migrated on CPU hotunplug
*
* The "callback function running and enqueued" status is only possible on
* SMP. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
@ -67,8 +69,11 @@ enum hrtimer_restart {
* as otherwise the timer could be removed before the softirq code finishes the
* the handling of the timer.
*
* The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state to
* preserve the HRTIMER_STATE_CALLBACK bit in the above scenario.
* The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
* to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
* also affects HRTIMER_STATE_MIGRATE where the preservation is not
* necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
* enqueued on the new cpu.
*
* All state transitions are protected by cpu_base->lock.
*/
@ -148,7 +153,12 @@ struct hrtimer_clock_base {
#endif
};
#define HRTIMER_MAX_CLOCK_BASES 2
enum hrtimer_base_type {
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_BOOTTIME,
HRTIMER_MAX_CLOCK_BASES,
};
/*
* struct hrtimer_cpu_base - the per cpu clock bases
@ -308,6 +318,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void);
extern ktime_t ktime_get_boottime(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@ -370,8 +381,9 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
extern ktime_t hrtimer_get_next_event(void);
/*
* A timer is active, when it is enqueued into the rbtree or the callback
* function is running.
* A timer is active, when it is enqueued into the rbtree or the
* callback function is running or it's in the state of being migrated
* to another cpu.
*/
static inline int hrtimer_active(const struct hrtimer *timer)
{

View file

@ -117,7 +117,7 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long end,
long adjust_next)
{
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
if (!vma->anon_vma || vma->vm_ops)
return;
__vma_adjust_trans_huge(vma, start, end, adjust_next);
}

292
include/linux/hwspinlock.h Normal file
View file

@ -0,0 +1,292 @@
/*
* Hardware spinlock public header
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_HWSPINLOCK_H
#define __LINUX_HWSPINLOCK_H
#include <linux/err.h>
#include <linux/sched.h>
/* hwspinlock mode argument */
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
struct hwspinlock;
#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
int hwspin_lock_register(struct hwspinlock *lock);
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
#else /* !CONFIG_HWSPINLOCK */
/*
* We don't want these functions to fail if CONFIG_HWSPINLOCK is not
* enabled. We prefer to silently succeed in this case, and let the
* code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
* required on a given setup, users will still work.
*
* The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
* we _do_ want users to fail (no point in registering hwspinlock instances if
* the framework is not available).
*
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
* users. Others, which care, can still check this with IS_ERR.
*/
static inline struct hwspinlock *hwspin_lock_request(void)
{
return ERR_PTR(-ENODEV);
}
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
return ERR_PTR(-ENODEV);
}
static inline int hwspin_lock_free(struct hwspinlock *hwlock)
{
return 0;
}
static inline
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
int mode, unsigned long *flags)
{
return 0;
}
static inline
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
return 0;
}
static inline
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
return 0;
}
static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
return 0;
}
static inline int hwspin_lock_register(struct hwspinlock *hwlock)
{
return -ENODEV;
}
static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
{
return NULL;
}
#endif /* !CONFIG_HWSPINLOCK */
/**
* hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
* @hwlock: an hwspinlock which we want to trylock
* @flags: a pointer to where the caller's interrupt state will be saved at
*
* This function attempts to lock the underlying hwspinlock, and will
* immediately fail if the hwspinlock is already locked.
*
* Upon a successful return from this function, preemption and local
* interrupts are disabled (previous interrupts state is saved at @flags),
* so the caller must not sleep, and is advised to release the hwspinlock
* as soon as possible.
*
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
*/
static inline
int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
{
return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
}
/**
* hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
* @hwlock: an hwspinlock which we want to trylock
*
* This function attempts to lock the underlying hwspinlock, and will
* immediately fail if the hwspinlock is already locked.
*
* Upon a successful return from this function, preemption and local
* interrupts are disabled, so the caller must not sleep, and is advised
* to release the hwspinlock as soon as possible.
*
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
*/
static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
{
return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
}
/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
* This function attempts to lock an hwspinlock, and will immediately fail
* if the hwspinlock is already taken.
*
* Upon a successful return from this function, preemption is disabled,
* so the caller must not sleep, and is advised to release the hwspinlock
* as soon as possible. This is required in order to minimize remote cores
* polling on the hardware interconnect.
*
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
*/
static inline int hwspin_trylock(struct hwspinlock *hwlock)
{
return __hwspin_trylock(hwlock, 0, NULL);
}
/**
* hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
* @flags: a pointer to where the caller's interrupt state will be saved at
*
* This function locks the underlying @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up when @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption and local interrupts
* are disabled (plus previous interrupt state is saved), so the caller must
* not sleep, and is advised to release the hwspinlock as soon as possible.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
unsigned int to, unsigned long *flags)
{
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
}
/**
* hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
*
* This function locks the underlying @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up when @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption and local interrupts
* are disabled so the caller must not sleep, and is advised to release the
* hwspinlock as soon as possible.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
static inline
int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
{
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
}
/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
*
* This function locks the underlying @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up when @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption is disabled
* so the caller must not sleep, and is advised to release the hwspinlock
* as soon as possible.
* This is required in order to minimize remote cores polling on the
* hardware interconnect.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
static inline
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
{
return __hwspin_lock_timeout(hwlock, to, 0, NULL);
}
/**
* hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
* @hwlock: a previously-acquired hwspinlock which we want to unlock
* @flags: previous caller's interrupt state to restore
*
* This function will unlock a specific hwspinlock, enable preemption and
* restore the previous state of the local interrupts. It should be used
* to undo, e.g., hwspin_trylock_irqsave().
*
* @hwlock must be already locked before calling this function: it is a bug
* to call unlock on a @hwlock that is already unlocked.
*/
static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
unsigned long *flags)
{
__hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
}
/**
* hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
* This function will unlock a specific hwspinlock, enable preemption and
* enable local interrupts. Should be used to undo hwspin_lock_irq().
*
* @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
* calling this function: it is a bug to call unlock on a @hwlock that is
* already unlocked.
*/
static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
{
__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
}
/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
* This function will unlock a specific hwspinlock and enable preemption
* back.
*
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
* this function: it is a bug to call unlock on a @hwlock that is already
* unlocked.
*/
static inline void hwspin_unlock(struct hwspinlock *hwlock)
{
__hwspin_unlock(hwlock, 0, NULL);
}
#endif /* __LINUX_HWSPINLOCK_H */

View file

@ -1,37 +0,0 @@
/* ------------------------------------------------------------------------- */
/* */
/* i2c-id.h - identifier values for i2c drivers and adapters */
/* */
/* ------------------------------------------------------------------------- */
/* Copyright (C) 1995-1999 Simon G. Vogl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
/* ------------------------------------------------------------------------- */
#ifndef LINUX_I2C_ID_H
#define LINUX_I2C_ID_H
/* Please note that I2C driver IDs are optional. They are only needed if a
legacy chip driver needs to identify a bus or a bus driver needs to
identify a legacy client. If you don't need them, just don't set them. */
/*
* ---- Adapter types ----------------------------------------------------
*/
/* --- Bit algorithm adapters */
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */
#endif /* LINUX_I2C_ID_H */

25
include/linux/i2c-tegra.h Normal file
View file

@ -0,0 +1,25 @@
/*
* drivers/i2c/busses/i2c-tegra.c
*
* Copyright (C) 2010 Google, Inc.
* Author: Colin Cross <ccross@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_I2C_TEGRA_H
#define _LINUX_I2C_TEGRA_H
struct tegra_i2c_platform_data {
unsigned long bus_clk_rate;
};
#endif /* _LINUX_I2C_TEGRA_H */

View file

@ -29,7 +29,6 @@
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
@ -105,8 +104,8 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @attach_adapter: Callback for bus addition (for legacy drivers)
* @detach_adapter: Callback for bus removal (for legacy drivers)
* @attach_adapter: Callback for bus addition (deprecated)
* @detach_adapter: Callback for bus removal (deprecated)
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
@ -144,11 +143,11 @@ struct i2c_driver {
unsigned int class;
/* Notifies the driver that a new bus has appeared or is about to be
* removed. You should avoid using this if you can, it will probably
* be removed in a near future.
* removed. You should avoid using this, it will be removed in a
* near future.
*/
int (*attach_adapter)(struct i2c_adapter *);
int (*detach_adapter)(struct i2c_adapter *);
int (*attach_adapter)(struct i2c_adapter *) __deprecated;
int (*detach_adapter)(struct i2c_adapter *) __deprecated;
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
@ -258,9 +257,7 @@ struct i2c_board_info {
unsigned short addr;
void *platform_data;
struct dev_archdata *archdata;
#ifdef CONFIG_OF
struct device_node *of_node;
#endif
int irq;
};
@ -356,7 +353,6 @@ struct i2c_algorithm {
*/
struct i2c_adapter {
struct module *owner;
unsigned int id __deprecated;
unsigned int class; /* classes to allow probing for */
const struct i2c_algorithm *algo; /* the algorithm to access the bus */
void *algo_data;
@ -398,6 +394,8 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
return NULL;
}
int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
/* Adapter locking functions, exported for shared pin cases */
void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
@ -449,7 +447,7 @@ extern void i2c_release_client(struct i2c_client *client);
extern void i2c_clients_command(struct i2c_adapter *adap,
unsigned int cmd, void *arg);
extern struct i2c_adapter *i2c_get_adapter(int id);
extern struct i2c_adapter *i2c_get_adapter(int nr);
extern void i2c_put_adapter(struct i2c_adapter *adap);

View file

@ -0,0 +1,36 @@
/*
* Platform Data for ADS1015 12-bit 4-input ADC
* (C) Copyright 2010
* Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef LINUX_ADS1015_H
#define LINUX_ADS1015_H
#define ADS1015_CHANNELS 8
struct ads1015_channel_data {
bool enabled;
unsigned int pga;
unsigned int data_rate;
};
struct ads1015_platform_data {
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
};
#endif /* LINUX_ADS1015_H */

View file

@ -1,5 +1,5 @@
/*
* AT42QT602240/ATMXT224 Touchscreen driver
* Atmel maXTouch Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
@ -10,21 +10,26 @@
* option) any later version.
*/
#ifndef __LINUX_QT602240_TS_H
#define __LINUX_QT602240_TS_H
#ifndef __LINUX_ATMEL_MXT_TS_H
#define __LINUX_ATMEL_MXT_TS_H
#include <linux/types.h>
/* Orient */
#define QT602240_NORMAL 0x0
#define QT602240_DIAGONAL 0x1
#define QT602240_HORIZONTAL_FLIP 0x2
#define QT602240_ROTATED_90_COUNTER 0x3
#define QT602240_VERTICAL_FLIP 0x4
#define QT602240_ROTATED_90 0x5
#define QT602240_ROTATED_180 0x6
#define QT602240_DIAGONAL_COUNTER 0x7
#define MXT_NORMAL 0x0
#define MXT_DIAGONAL 0x1
#define MXT_HORIZONTAL_FLIP 0x2
#define MXT_ROTATED_90_COUNTER 0x3
#define MXT_VERTICAL_FLIP 0x4
#define MXT_ROTATED_90 0x5
#define MXT_ROTATED_180 0x6
#define MXT_DIAGONAL_COUNTER 0x7
/* The platform data for the Atmel maXTouch touchscreen driver */
struct mxt_platform_data {
const u8 *config;
size_t config_length;
/* The platform data for the AT42QT602240/ATMXT224 touchscreen driver */
struct qt602240_platform_data {
unsigned int x_line;
unsigned int y_line;
unsigned int x_size;
@ -33,6 +38,7 @@ struct qt602240_platform_data {
unsigned int threshold;
unsigned int voltage;
unsigned char orient;
unsigned long irqflags;
};
#endif /* __LINUX_QT602240_TS_H */
#endif /* __LINUX_ATMEL_MXT_TS_H */

View file

@ -0,0 +1,14 @@
#ifndef _LINUX_MAX6639_H
#define _LINUX_MAX6639_H
#include <linux/types.h>
/* platform data for the MAX6639 temperature sensor and fan control */
struct max6639_platform_data {
bool pwm_polarity; /* Polarity low (0) or high (1, default) */
int ppr; /* Pulses per rotation 1..4 (default == 2) */
int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
};
#endif /* _LINUX_MAX6639_H */

View file

@ -18,6 +18,7 @@
#define MCS_KEY_CODE(v) ((v) & 0xffff)
struct mcs_platform_data {
void (*poweron)(bool);
void (*cfg_pin)(void);
/* touchscreen */

Some files were not shown because too many files have changed in this diff Show more