Merge branch 'acpi-resources'

* acpi-resources: (23 commits)
  Merge branch 'pci/host-generic' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci into acpi-resources
  x86/irq, ACPI: Implement ACPI driver to support IOAPIC hotplug
  ACPI: Add interfaces to parse IOAPIC ID for IOAPIC hotplug
  x86/PCI: Refine the way to release PCI IRQ resources
  x86/PCI/ACPI: Use common ACPI resource interfaces to simplify implementation
  x86/PCI: Fix the range check for IO resources
  PCI: Use common resource list management code instead of private implementation
  resources: Move struct resource_list_entry from ACPI into resource core
  ACPI: Introduce helper function acpi_dev_filter_resource_type()
  ACPI: Add field offset to struct resource_list_entry
  ACPI: Translate resource into master side address for bridge window resources
  ACPI: Return translation offset when parsing ACPI address space resources
  ACPI: Enforce stricter checks for address space descriptors
  ACPI: Set flag IORESOURCE_UNSET for unassigned resources
  ACPI: Normalize return value of resource parser functions
  ACPI: Fix a bug in parsing ACPI Memory24 resource
  ACPI: Add prefetch decoding to the address space parser
  ACPI: Move the window flag logic to the combined parser
  ACPI: Unify the parsing of address_space and ext_address_space
  ACPI: Let the parser return false for disabled resources
  ...
This commit is contained in:
Rafael J. Wysocki 2015-02-10 16:05:16 +01:00
commit 8fbcf5ecb3
30 changed files with 879 additions and 456 deletions

View file

@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
extern bool mp_should_keep_irq(struct device *dev);
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);

View file

@ -10,9 +10,6 @@
struct pci_root_info {
struct acpi_device *bridge;
char name[16];
unsigned int res_num;
struct resource *res;
resource_size_t *res_offset;
struct pci_sysdata sd;
#ifdef CONFIG_PCI_MMCONFIG
bool mcfg_added;
@ -218,130 +215,41 @@ static void teardown_mcfg_map(struct pci_root_info *info)
}
#endif
static acpi_status resource_to_addr(struct acpi_resource *resource,
struct acpi_resource_address64 *addr)
static void validate_resources(struct device *dev, struct list_head *crs_res,
unsigned long type)
{
acpi_status status;
struct acpi_resource_memory24 *memory24;
struct acpi_resource_memory32 *memory32;
struct acpi_resource_fixed_memory32 *fixed_memory32;
LIST_HEAD(list);
struct resource *res1, *res2, *root = NULL;
struct resource_entry *tmp, *entry, *entry2;
memset(addr, 0, sizeof(*addr));
switch (resource->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &resource->data.memory24;
addr->resource_type = ACPI_MEMORY_RANGE;
addr->address.minimum = memory24->minimum;
addr->address.address_length = memory24->address_length;
addr->address.maximum = addr->address.minimum + addr->address.address_length - 1;
return AE_OK;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &resource->data.memory32;
addr->resource_type = ACPI_MEMORY_RANGE;
addr->address.minimum = memory32->minimum;
addr->address.address_length = memory32->address_length;
addr->address.maximum = addr->address.minimum + addr->address.address_length - 1;
return AE_OK;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &resource->data.fixed_memory32;
addr->resource_type = ACPI_MEMORY_RANGE;
addr->address.minimum = fixed_memory32->address;
addr->address.address_length = fixed_memory32->address_length;
addr->address.maximum = addr->address.minimum + addr->address.address_length - 1;
return AE_OK;
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
status = acpi_resource_to_address64(resource, addr);
if (ACPI_SUCCESS(status) &&
(addr->resource_type == ACPI_MEMORY_RANGE ||
addr->resource_type == ACPI_IO_RANGE) &&
addr->address.address_length > 0) {
return AE_OK;
}
break;
}
return AE_ERROR;
}
BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
{
struct pci_root_info *info = data;
struct acpi_resource_address64 addr;
acpi_status status;
list_splice_init(crs_res, &list);
resource_list_for_each_entry_safe(entry, tmp, &list) {
bool free = false;
resource_size_t end;
status = resource_to_addr(acpi_res, &addr);
if (ACPI_SUCCESS(status))
info->res_num++;
return AE_OK;
}
static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
{
struct pci_root_info *info = data;
struct resource *res;
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
u64 start, orig_end, end;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
return AE_OK;
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
flags |= IORESOURCE_PREFETCH;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
} else
return AE_OK;
start = addr.address.minimum + addr.address.translation_offset;
orig_end = end = addr.address.maximum + addr.address.translation_offset;
/* Exclude non-addressable range or non-addressable portion of range */
end = min(end, (u64)iomem_resource.end);
if (end <= start) {
dev_info(&info->bridge->dev,
"host bridge window [%#llx-%#llx] "
"(ignored, not CPU addressable)\n", start, orig_end);
return AE_OK;
} else if (orig_end != end) {
dev_info(&info->bridge->dev,
"host bridge window [%#llx-%#llx] "
"([%#llx-%#llx] ignored, not CPU addressable)\n",
start, orig_end, end + 1, orig_end);
}
res = &info->res[info->res_num];
res->name = info->name;
res->flags = flags;
res->start = start;
res->end = end;
info->res_offset[info->res_num] = addr.address.translation_offset;
info->res_num++;
if (!pci_use_crs)
dev_printk(KERN_DEBUG, &info->bridge->dev,
"host bridge window %pR (ignored)\n", res);
return AE_OK;
}
static void coalesce_windows(struct pci_root_info *info, unsigned long type)
{
int i, j;
struct resource *res1, *res2;
for (i = 0; i < info->res_num; i++) {
res1 = &info->res[i];
res1 = entry->res;
if (!(res1->flags & type))
continue;
goto next;
for (j = i + 1; j < info->res_num; j++) {
res2 = &info->res[j];
/* Exclude non-addressable range or non-addressable portion */
end = min(res1->end, root->end);
if (end <= res1->start) {
dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
res1);
free = true;
goto next;
} else if (res1->end != end) {
dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
res1, (unsigned long long)end + 1,
(unsigned long long)res1->end);
res1->end = end;
}
resource_list_for_each_entry(entry2, crs_res) {
res2 = entry2->res;
if (!(res2->flags & type))
continue;
@ -353,118 +261,92 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type)
if (resource_overlaps(res1, res2)) {
res2->start = min(res1->start, res2->start);
res2->end = max(res1->end, res2->end);
dev_info(&info->bridge->dev,
"host bridge window expanded to %pR; %pR ignored\n",
dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
res2, res1);
res1->flags = 0;
free = true;
goto next;
}
}
next:
resource_list_del(entry);
if (free)
resource_list_free_entry(entry);
else
resource_list_add_tail(entry, crs_res);
}
}
static void add_resources(struct pci_root_info *info,
struct list_head *resources)
struct list_head *resources,
struct list_head *crs_res)
{
int i;
struct resource *res, *root, *conflict;
struct resource_entry *entry, *tmp;
struct resource *res, *conflict, *root = NULL;
coalesce_windows(info, IORESOURCE_MEM);
coalesce_windows(info, IORESOURCE_IO);
for (i = 0; i < info->res_num; i++) {
res = &info->res[i];
validate_resources(&info->bridge->dev, crs_res, IORESOURCE_MEM);
validate_resources(&info->bridge->dev, crs_res, IORESOURCE_IO);
resource_list_for_each_entry_safe(entry, tmp, crs_res) {
res = entry->res;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
else if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
continue;
BUG_ON(res);
conflict = insert_resource_conflict(root, res);
if (conflict)
if (conflict) {
dev_info(&info->bridge->dev,
"ignoring host bridge window %pR (conflicts with %s %pR)\n",
res, conflict->name, conflict);
else
pci_add_resource_offset(resources, res,
info->res_offset[i]);
}
}
static void free_pci_root_info_res(struct pci_root_info *info)
{
kfree(info->res);
info->res = NULL;
kfree(info->res_offset);
info->res_offset = NULL;
info->res_num = 0;
}
static void __release_pci_root_info(struct pci_root_info *info)
{
int i;
struct resource *res;
for (i = 0; i < info->res_num; i++) {
res = &info->res[i];
if (!res->parent)
continue;
if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
continue;
release_resource(res);
resource_list_destroy_entry(entry);
}
}
free_pci_root_info_res(info);
teardown_mcfg_map(info);
kfree(info);
list_splice_tail(crs_res, resources);
}
static void release_pci_root_info(struct pci_host_bridge *bridge)
{
struct resource *res;
struct resource_entry *entry;
struct pci_root_info *info = bridge->release_data;
__release_pci_root_info(info);
resource_list_for_each_entry(entry, &bridge->windows) {
res = entry->res;
if (res->parent &&
(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
release_resource(res);
}
teardown_mcfg_map(info);
kfree(info);
}
static void probe_pci_root_info(struct pci_root_info *info,
struct acpi_device *device,
int busnum, int domain)
int busnum, int domain,
struct list_head *list)
{
size_t size;
int ret;
struct resource_entry *entry;
sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
info->bridge = device;
info->res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
info);
if (!info->res_num)
return;
size = sizeof(*info->res) * info->res_num;
info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node);
if (!info->res) {
info->res_num = 0;
return;
}
size = sizeof(*info->res_offset) * info->res_num;
info->res_num = 0;
info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
if (!info->res_offset) {
kfree(info->res);
info->res = NULL;
return;
}
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
info);
ret = acpi_dev_get_resources(device, list,
acpi_dev_filter_resource_type_cb,
(void *)(IORESOURCE_IO | IORESOURCE_MEM));
if (ret < 0)
dev_warn(&device->dev,
"failed to parse _CRS method, error code %d\n", ret);
else if (ret == 0)
dev_dbg(&device->dev,
"no IO and memory resources present in _CRS\n");
else
resource_list_for_each_entry(entry, list)
entry->res->name = info->name;
}
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
@ -473,6 +355,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
struct pci_root_info *info;
int domain = root->segment;
int busnum = root->secondary.start;
struct resource_entry *res_entry;
LIST_HEAD(crs_res);
LIST_HEAD(resources);
struct pci_bus *bus;
struct pci_sysdata *sd;
@ -520,18 +404,22 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
memcpy(bus->sysdata, sd, sizeof(*sd));
kfree(info);
} else {
probe_pci_root_info(info, device, busnum, domain);
/* insert busn res at first */
pci_add_resource(&resources, &root->secondary);
/*
* _CRS with no apertures is normal, so only fall back to
* defaults or native bridge info if we're ignoring _CRS.
*/
if (pci_use_crs)
add_resources(info, &resources);
else {
free_pci_root_info_res(info);
probe_pci_root_info(info, device, busnum, domain, &crs_res);
if (pci_use_crs) {
add_resources(info, &resources, &crs_res);
} else {
resource_list_for_each_entry(res_entry, &crs_res)
dev_printk(KERN_DEBUG, &device->dev,
"host bridge window %pR (ignored)\n",
res_entry->res);
resource_list_free(&crs_res);
x86_pci_root_bus_resources(busnum, &resources);
}
@ -546,8 +434,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
to_pci_host_bridge(bus->bridge),
release_pci_root_info, info);
} else {
pci_free_resource_list(&resources);
__release_pci_root_info(info);
resource_list_free(&resources);
teardown_mcfg_map(info);
kfree(info);
}
}

View file

@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
{
struct pci_root_info *info = x86_find_pci_root_info(bus);
struct pci_root_res *root_res;
struct pci_host_bridge_window *window;
struct resource_entry *window;
bool found = false;
if (!info)
@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
bus);
/* already added by acpi ? */
list_for_each_entry(window, resources, list)
resource_list_for_each_entry(window, resources)
if (window->res->flags & IORESOURCE_BUS) {
found = true;
break;

View file

@ -513,6 +513,31 @@ void __init pcibios_set_cache_line_size(void)
}
}
/*
* Some device drivers assume dev->irq won't change after calling
* pci_disable_device(). So delay releasing of IRQ resource to driver
* unbinding time. Otherwise it will break PM subsystem and drivers
* like xen-pciback etc.
*/
static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct pci_dev *dev = to_pci_dev(data);
if (action != BUS_NOTIFY_UNBOUND_DRIVER)
return NOTIFY_DONE;
if (pcibios_disable_irq)
pcibios_disable_irq(dev);
return NOTIFY_OK;
}
static struct notifier_block pci_irq_nb = {
.notifier_call = pci_irq_notifier,
.priority = INT_MIN,
};
int __init pcibios_init(void)
{
if (!raw_pci_ops) {
@ -525,6 +550,9 @@ int __init pcibios_init(void)
if (pci_bf_sort >= pci_force_bf)
pci_sort_breadthfirst();
bus_register_notifier(&pci_bus_type, &pci_irq_nb);
return 0;
}
@ -683,12 +711,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return 0;
}
void pcibios_disable_device (struct pci_dev *dev)
{
if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(void)
{
if (raw_pci_ext_ops)

View file

@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
dev->irq > 0) {
if (dev->irq_managed && dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
dev->irq = 0;
}
}

View file

@ -1256,22 +1256,9 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
bool mp_should_keep_irq(struct device *dev)
{
if (dev->power.is_prepared)
return true;
#ifdef CONFIG_PM
if (dev->power.runtime_status == RPM_SUSPENDING)
return true;
#endif
return false;
}
static void pirq_disable_irq(struct pci_dev *dev)
{
if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
dev->irq_managed && dev->irq) {
if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
dev->irq = 0;
dev->irq_managed = 0;