This is the 5.10.112 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmJftPsACgkQONu9yGCS
 aT6y3w//Vmd+BesdtAOmPS333kwZb1d4o7GginMjLof6TYh4T+uYovySJVU21PvY
 fQtTjEa0VI3kidrwbW/zCG5IGskWYynz/lJNWeTQ57zEtOJTXhETgUmeT5WG+rr7
 oxMZV7zXunZariXk9FVdxjQywSdV82brcSoEbK+CpWhaJW4H3UrWbM0HDF/en/9T
 9AcnIJ3o5t3O+BDWd7VTqNhptLk3/PHS8W3vEFOo6ptFJYUzXgiZdc5YYmuNlDy+
 +84PC19DDE0sd9mX7Pl0eFB0lc6nkpEciq/nFUtygLsJihZajIaIeK6Sa+iewfgc
 6U+zBRAwocv8wq2lbzrXJXg5TpPhQ6pJbOlcdwU7MfmsuzTk3m3TXLo4x1SH21wW
 aztNPNrAVly/DphyvvU1QpqyoMiF+al5zbCifDOpEgi4tenakQD3QDDcn5FfvYjw
 5IPCtsZQ9fAAgwtpQMzyCmHc9Y4LAhPBDFC7thh2iW9kO5RlWxSBuedgeoIMne6p
 Zf8iKKcVE47y/c5Q8MB4h+qOZU6k5VQSjK6A+AtdCcHhNQOWAz8kOsK4Fe0jADqP
 okdjvV8qtga0/O7PsKMYvxce4eqKgAN3f3mFT4nF+fQNTBLiML+UblRTyi8CVYdi
 /MK6ulzBIk6Ch4qewwTsHlbeHGd882sS9pLakpFyqW3RywMnOBA=
 =2Ugm
 -----END PGP SIGNATURE-----

Merge 5.10.112 into android12-5.10-lts

Changes in 5.10.112
	drm/amdkfd: Use drm_priv to pass VM from KFD to amdgpu
	hamradio: defer 6pack kfree after unregister_netdev
	hamradio: remove needs_free_netdev to avoid UAF
	cpuidle: PSCI: Move the `has_lpi` check to the beginning of the function
	ACPI: processor idle: Check for architectural support for LPI
	btrfs: remove unused variable in btrfs_{start,write}_dirty_block_groups()
	drm/msm: Add missing put_task_struct() in debugfs path
	memory: atmel-ebi: Fix missing of_node_put in atmel_ebi_probe
	firmware: arm_scmi: Fix sorting of retrieved clock rates
	media: rockchip/rga: do proper error checking in probe
	SUNRPC: Fix the svc_deferred_event trace class
	net/sched: flower: fix parsing of ethertype following VLAN header
	veth: Ensure eth header is in skb's linear part
	gpiolib: acpi: use correct format characters
	net: mdio: Alphabetically sort header inclusion
	mlxsw: i2c: Fix initialization error flow
	net/sched: fix initialization order when updating chain 0 head
	net: dsa: felix: suppress -EPROBE_DEFER errors
	net: ethernet: stmmac: fix altr_tse_pcs function when using a fixed-link
	net/sched: taprio: Check if socket flags are valid
	cfg80211: hold bss_lock while updating nontrans_list
	drm/msm: Fix range size vs end confusion
	drm/msm/dsi: Use connector directly in msm_dsi_manager_connector_init()
	net/smc: Fix NULL pointer dereference in smc_pnet_find_ib()
	scsi: pm80xx: Mask and unmask upper interrupt vectors 32-63
	scsi: pm80xx: Enable upper inbound, outbound queues
	scsi: iscsi: Stop queueing during ep_disconnect
	scsi: iscsi: Force immediate failure during shutdown
	scsi: iscsi: Use system_unbound_wq for destroy_work
	scsi: iscsi: Rel ref after iscsi_lookup_endpoint()
	scsi: iscsi: Fix in-kernel conn failure handling
	scsi: iscsi: Move iscsi_ep_disconnect()
	scsi: iscsi: Fix offload conn cleanup when iscsid restarts
	scsi: iscsi: Fix conn cleanup and stop race during iscsid restart
	sctp: Initialize daddr on peeled off socket
	testing/selftests/mqueue: Fix mq_perf_tests to free the allocated cpu set
	perf tools: Fix misleading add event PMU debug message
	nfc: nci: add flush_workqueue to prevent uaf
	cifs: potential buffer overflow in handling symlinks
	dm mpath: only use ktime_get_ns() in historical selector
	net: bcmgenet: Revert "Use stronger register read/writes to assure ordering"
	drm/amd: Add USBC connector ID
	btrfs: fix fallocate to use file_modified to update permissions consistently
	btrfs: do not warn for free space inode in cow_file_range
	drm/amd/display: fix audio format not updated after edid updated
	drm/amd/display: FEC check in timing validation
	drm/amd/display: Update VTEM Infopacket definition
	drm/amdkfd: Fix Incorrect VMIDs passed to HWS
	drm/amdgpu/vcn: improve vcn dpg stop procedure
	drm/amdkfd: Check for potential null return of kmalloc_array()
	Drivers: hv: vmbus: Prevent load re-ordering when reading ring buffer
	scsi: target: tcmu: Fix possible page UAF
	scsi: lpfc: Fix queue failures when recovering from PCI parity error
	scsi: ibmvscsis: Increase INITIAL_SRP_LIMIT to 1024
	net: micrel: fix KS8851_MLL Kconfig
	ata: libata-core: Disable READ LOG DMA EXT for Samsung 840 EVOs
	gpu: ipu-v3: Fix dev_dbg frequency output
	regulator: wm8994: Add an off-on delay for WM8994 variant
	arm64: alternatives: mark patch_alternative() as `noinstr`
	tlb: hugetlb: Add more sizes to tlb_remove_huge_tlb_entry
	net: axienet: setup mdio unconditionally
	net: usb: aqc111: Fix out-of-bounds accesses in RX fixup
	myri10ge: fix an incorrect free for skb in myri10ge_sw_tso
	drm/amd/display: Revert FEC check in validation
	drm/amd/display: Fix allocate_mst_payload assert on resume
	scsi: mvsas: Add PCI ID of RocketRaid 2640
	scsi: megaraid_sas: Target with invalid LUN ID is deleted during scan
	drivers: net: slip: fix NPD bug in sl_tx_timeout()
	perf/imx_ddr: Fix undefined behavior due to shift overflowing the constant
	mm, page_alloc: fix build_zonerefs_node()
	mm: fix unexpected zeroed page mapping with zram swap
	mm: kmemleak: take a full lowmem check in kmemleak_*_phys()
	KVM: x86/mmu: Resolve nx_huge_pages when kvm.ko is loaded
	memory: renesas-rpc-if: fix platform-device leak in error path
	gcc-plugins: latent_entropy: use /dev/urandom
	ath9k: Properly clear TX status area before reporting to mac80211
	ath9k: Fix usage of driver-private space in tx_info
	btrfs: fix root ref counts in error handling in btrfs_get_root_ref
	btrfs: mark resumed async balance as writing
	ALSA: hda/realtek: Add quirk for Clevo PD50PNT
	ALSA: hda/realtek: add quirk for Lenovo Thinkpad X12 speakers
	ALSA: pcm: Test for "silence" field in struct "pcm_format_data"
	nl80211: correctly check NL80211_ATTR_REG_ALPHA2 size
	ipv6: fix panic when forwarding a pkt with no in6 dev
	drm/amd/display: don't ignore alpha property on pre-multiplied mode
	drm/amdgpu: Enable gfxoff quirk on MacBook Pro
	genirq/affinity: Consider that CPUs on nodes can be unbalanced
	tick/nohz: Use WARN_ON_ONCE() to prevent console saturation
	ARM: davinci: da850-evm: Avoid NULL pointer dereference
	dm integrity: fix memory corruption when tag_size is less than digest size
	smp: Fix offline cpu check in flush_smp_call_function_queue()
	i2c: pasemi: Wait for write xfers to finish
	timers: Fix warning condition in __run_timers()
	dma-direct: avoid redundant memory sync for swiotlb
	scsi: iscsi: Fix endpoint reuse regression
	scsi: iscsi: Fix unbound endpoint error handling
	ax25: add refcount in ax25_dev to avoid UAF bugs
	ax25: fix reference count leaks of ax25_dev
	ax25: fix UAF bugs of net_device caused by rebinding operation
	ax25: Fix refcount leaks caused by ax25_cb_del()
	ax25: fix UAF bug in ax25_send_control()
	ax25: fix NPD bug in ax25_disconnect
	ax25: Fix NULL pointer dereferences in ax25 timers
	ax25: Fix UAF bugs in ax25 timers
	Linux 5.10.112

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I9ce7b432f335445dbfb4a67a34a8a1c279011954
This commit is contained in:
Greg Kroah-Hartman 2022-04-29 09:15:09 +02:00
commit de64d941a7
124 changed files with 1058 additions and 583 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 111
SUBLEVEL = 112
EXTRAVERSION =
NAME = Dare mighty things

View file

@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
int ret;
u32 val;
struct davinci_soc_info *soc_info = &davinci_soc_info;
u8 rmii_en = soc_info->emac_pdata->rmii_en;
u8 rmii_en;
if (!machine_is_davinci_da850_evm())
return 0;
rmii_en = soc_info->emac_pdata->rmii_en;
cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
val = __raw_readl(cfg_chip3_base);

View file

@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature)
/*
* Check if the target PC is within an alternative block.
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
{
u32 insn;
@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
return insn;
}
static void patch_alternative(struct alt_instr *alt,
static noinstr void patch_alternative(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
__le32 *replptr;

View file

@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
struct acpi_lpi_state *lpi;
struct acpi_processor *pr = per_cpu(processors, cpu);
if (unlikely(!pr || !pr->flags.has_lpi))
return -EINVAL;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
if (unlikely(!pr || !pr->flags.has_lpi))
return -EINVAL;
count = pr->power.count - 1;
if (count <= 0)
return -ENODEV;

View file

@ -1340,8 +1340,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
return -ENOTSUPP;
}
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
void kvm_mmu_x86_module_init(void);
int kvm_mmu_vendor_module_init(void);
void kvm_mmu_vendor_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);

View file

@ -5876,12 +5876,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
return 0;
}
int kvm_mmu_module_init(void)
/*
* nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
* its default value of -1 is technically undefined behavior for a boolean.
*/
void kvm_mmu_x86_module_init(void)
{
int ret = -ENOMEM;
if (nx_huge_pages == -1)
__set_nx_huge_pages(get_nx_auto_mode());
}
/*
* The bulk of the MMU initialization is deferred until the vendor module is
* loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
* to be reset when a potentially different vendor module is loaded.
*/
int kvm_mmu_vendor_module_init(void)
{
int ret = -ENOMEM;
/*
* MMU roles use union aliasing which is, generally speaking, an
@ -5955,7 +5967,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu);
}
void kvm_mmu_module_exit(void)
void kvm_mmu_vendor_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);

View file

@ -8005,7 +8005,7 @@ int kvm_arch_init(void *opaque)
goto out_free_x86_emulator_cache;
}
r = kvm_mmu_module_init();
r = kvm_mmu_vendor_module_init();
if (r)
goto out_free_percpu;
@ -8065,7 +8065,7 @@ void kvm_arch_exit(void)
cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
kvm_mmu_vendor_module_exit();
free_percpu(user_return_msrs);
kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
@ -11426,3 +11426,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
static int __init kvm_x86_init(void)
{
kvm_mmu_x86_module_init();
return 0;
}
module_init(kvm_x86_init);
static void __exit kvm_x86_exit(void)
{
/*
* If module_init() is implemented, module_exit() must also be
* implemented to allow module unload.
*/
}
module_exit(kvm_x86_exit);

View file

@ -1080,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr,
return 0;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return -EOPNOTSUPP;
}
static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
{
int ret, i;
@ -1088,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
if (ret == -EOPNOTSUPP)
return ret;
if (!osc_pc_lpi_support_confirmed)
return -EOPNOTSUPP;
@ -1139,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
return 0;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return -ENODEV;
}
int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
return -ENODEV;

View file

@ -3974,6 +3974,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_NO_DMA_LOG |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |

View file

@ -204,7 +204,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
rate_cmp_func, NULL);
}
clk->rate_discrete = rate_discrete;

View file

@ -276,8 +276,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
pin = agpio->pin_table[0];
if (pin <= 255) {
char ev_name[5];
sprintf(ev_name, "_%c%02hhX",
char ev_name[8];
sprintf(ev_name, "_%c%02X",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))

View file

@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */

View file

@ -1024,11 +1024,15 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct dma_fence **ef)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct drm_file *drm_priv = filp->private_data;
struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
struct amdgpu_vm *avm = &drv_priv->vm;
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
int ret;
ret = amdgpu_file_to_fpriv(filp, &drv_priv);
if (ret)
return ret;
avm = &drv_priv->vm;
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;

View file

@ -632,7 +632,7 @@ MODULE_PARM_DESC(sched_policy,
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
* number of VMIDs assigned to the HWS, which is also the default.
*/
int hws_max_conc_proc = 8;
int hws_max_conc_proc = -1;
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");

View file

@ -1248,6 +1248,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};

View file

@ -1429,8 +1429,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
vcn_v3_0_pause_dpg_mode(adev, 0, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

View file

@ -664,15 +664,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
- kfd->vm_info.first_vmid_kfd + 1;
/* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
dev_err(kfd_device,
"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
kfd->vm_info.vmid_num_kfd);
if (hws_max_conc_proc >= 0)
kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
else
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *

View file

@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
if (!event_waiters)
return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
init_wait(&event_waiters[i].wait);

View file

@ -2022,7 +2022,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
if (aconnector->mst_port)
if (aconnector->dc_link &&
aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);

View file

@ -1701,8 +1701,8 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
/*compare audio info*/
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
return true;

View file

@ -2387,14 +2387,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
} else if (per_pixel_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
} else {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else

View file

@ -2270,14 +2270,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
} else if (per_pixel_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
} else {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else

View file

@ -100,7 +100,8 @@ enum vsc_packet_revision {
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
#define MASK_VTEM_MD0__RESERVED2 0x0C
#define MASK_VTEM_MD0__QMS_EN 0x04
#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@ -109,7 +110,7 @@ enum vsc_packet_revision {
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
#define MASK_VTEM_MD2__RESERVED3 0xF8
#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF

View file

@ -1222,7 +1222,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
"gpu", 0x100000000ULL, 0x1ffffffffULL);
"gpu", 0x100000000ULL, SZ_4G);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)

View file

@ -644,7 +644,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
fail:
connector->funcs->destroy(msm_dsi->connector);
connector->funcs->destroy(connector);
return ERR_PTR(ret);
}

View file

@ -849,6 +849,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}

View file

@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
error = rate / (sig->mode.pixelclock / 1000);
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
rate, div, (signed)(error - 1000) / 10, error % 10);
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
rate, div, error < 1000 ? '-' : '+',
abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {

View file

@ -378,7 +378,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
u32 write_loc;
/*
* The Hyper-V host writes the packet data, then uses
* store_release() to update the write_index. Use load_acquire()
* here to prevent loads of the packet data from being re-ordered
* before the read of the write_index and potentially getting
* stale data.
*/
write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;

View file

@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
if (stop) {
err = pasemi_smb_waitready(smbus);
if (err)
goto reset_out;
}
}
return 0;

View file

@ -499,6 +499,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
iser_conn->iscsi_conn = conn;
out:
iscsi_put_endpoint(ep);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
@ -988,6 +989,7 @@ static struct iscsi_transport iscsi_iser_transport = {
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,

View file

@ -429,7 +429,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
u64 time_now = sched_clock();
u64 time_now = ktime_get_ns();
struct dm_path *ret = NULL;
unsigned long flags;
@ -470,7 +470,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
static u64 path_service_time(struct path_info *pi, u64 start_time)
{
u64 sched_now = ktime_get_ns();
u64 now = ktime_get_ns();
/* if a previous disk request has finished after this IO was
* sent to the hardware, pretend the submission happened
@ -479,11 +479,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
if (time_after64(pi->last_finish, start_time))
start_time = pi->last_finish;
pi->last_finish = sched_now;
if (time_before64(sched_now, start_time))
pi->last_finish = now;
if (time_before64(now, start_time))
return 0;
return sched_now - start_time;
return now - start_time;
}
static int hst_end_io(struct path_selector *ps, struct dm_path *path,

View file

@ -4232,6 +4232,7 @@ try_smaller_buffer:
}
if (ic->internal_hash) {
size_t recalc_tags_size;
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@ -4245,8 +4246,10 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;

View file

@ -895,7 +895,7 @@ static int rga_probe(struct platform_device *pdev)
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
if (rga->dst_mmu_pages) {
if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}

View file

@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
if (IS_ERR(ebi->smc.regmap))
return PTR_ERR(ebi->smc.regmap);
if (IS_ERR(ebi->smc.regmap)) {
ret = PTR_ERR(ebi->smc.regmap);
goto put_node;
}
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
if (IS_ERR(ebi->smc.layout))
return PTR_ERR(ebi->smc.layout);
if (IS_ERR(ebi->smc.layout)) {
ret = PTR_ERR(ebi->smc.layout);
goto put_node;
}
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
if (PTR_ERR(ebi->smc.clk) != -ENOENT)
return PTR_ERR(ebi->smc.clk);
if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
ret = PTR_ERR(ebi->smc.clk);
goto put_node;
}
ebi->smc.clk = NULL;
}
of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
}
return of_platform_populate(np, NULL, NULL, dev);
put_node:
of_node_put(smc_np);
return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)

View file

@ -592,6 +592,7 @@ static int rpcif_probe(struct platform_device *pdev)
struct platform_device *vdev;
struct device_node *flash;
const char *name;
int ret;
flash = of_get_next_child(pdev->dev.of_node, NULL);
if (!flash) {
@ -615,7 +616,14 @@ static int rpcif_probe(struct platform_device *pdev)
return -ENOMEM;
vdev->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, vdev);
return platform_device_add(vdev);
ret = platform_device_add(vdev);
if (ret) {
platform_device_put(vdev);
return ret;
}
return 0;
}
static int rpcif_remove(struct platform_device *pdev)

View file

@ -1466,7 +1466,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}

View file

@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
writel(value, offset);
writel_relaxed(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
return readl(offset);
return readl_relaxed(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,

View file

@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
return 0;
errout:
mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;

View file

@ -37,6 +37,7 @@ config KS8851
config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
select MII
select CRC32
select EEPROM_93CX6

View file

@ -2895,11 +2895,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
if (segs != NULL) {
curr = segs;
segs = next;
skb_list_walk_safe(next, curr, next) {
curr->next = NULL;
dev_kfree_skb_any(segs);
dev_kfree_skb_any(curr);
}
goto drop;
}

View file

@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
#define SGMII_ADAPTER_ENABLE 0x0000
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {

View file

@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_ENABLE 0x0000
#define SGMII_ADAPTER_DISABLE 0x0001
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;

View file

@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
if ((tse_pcs_base) && (sgmii_adapter_base))
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
if (splitter_base) {
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
if (tse_pcs_base && sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
if (phy_dev)
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
}

View file

@ -2060,15 +2060,14 @@ static int axienet_probe(struct platform_device *pdev)
if (ret)
goto cleanup_clk;
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp);
if (ret)
dev_warn(&pdev->dev,
"error registering MDIO bus: %d\n", ret);
}
ret = axienet_mdio_setup(lp);
if (ret)
dev_warn(&pdev->dev,
"error registering MDIO bus: %d\n", ret);
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!lp->phy_node) {
dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
ret = -EINVAL;

View file

@ -311,7 +311,6 @@ static void sp_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
dev->needs_free_netdev = true;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
@ -679,9 +678,11 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers. */
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
}
/* Perform I/O control on an active 6pack channel. */

View file

@ -5,20 +5,18 @@
* Copyright (C) 2014-2017 Broadcom
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
#define MDIO_CMD 0x00
#define MDIO_START_BUSY (1 << 29)

View file

@ -14,10 +14,10 @@
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
#include <linux/module.h>
#include <linux/mdio-bitbang.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/mdio-bitbang.h>
#include <linux/module.h>
#include <linux/types.h>
#define MDIO_READ 2
#define MDIO_WRITE 1

View file

@ -4,9 +4,9 @@
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/io.h>
#include "mdio-cavium.h"

View file

@ -17,15 +17,15 @@
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/mdio-gpio.h>
#include <linux/mdio-bitbang.h>
#include <linux/mdio-gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mdio-gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;

View file

@ -3,10 +3,10 @@
/* Copyright (c) 2020 Sartura Ltd. */
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>

View file

@ -7,12 +7,12 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/of_mdio.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
/* MII address register definitions */
#define MII_ADDR_REG_ADDR 0x10

View file

@ -6,14 +6,14 @@
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#define MSCC_MIIM_REG_STATUS 0x0
#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)

View file

@ -3,14 +3,14 @@
* Copyright 2016 Broadcom
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004

View file

@ -3,13 +3,13 @@
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/gpio/consumer.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"

View file

@ -7,13 +7,13 @@
* Copyright 2012 Freescale Semiconductor, Inc.
*/
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/platform_device.h>
struct mdio_mux_mmioreg_state {
void *mux_handle;

View file

@ -4,10 +4,10 @@
* Copyright 2019 NXP
*/
#include <linux/platform_device.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/platform_device.h>
struct mdio_mux_multiplexer_state {
struct mux_control *muxc;

View file

@ -3,12 +3,12 @@
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
#include <linux/platform_device.h>
#include <linux/mdio-mux.h>
#include <linux/of_mdio.h>
#include <linux/device.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#define DRV_DESCRIPTION "MDIO bus multiplexer driver"

View file

@ -3,13 +3,13 @@
* Copyright (C) 2009-2015 Cavium, Inc.
*/
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/phy.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include "mdio-cavium.h"

View file

@ -3,14 +3,14 @@
* Copyright (C) 2009-2016 Cavium, Inc.
*/
#include <linux/acpi.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/phy.h>
#include <linux/io.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/phy.h>
#include "mdio-cavium.h"

View file

@ -13,11 +13,11 @@
#include <linux/io.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/prefetch.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/prefetch.h>
#include <net/ip.h>
static bool xgene_mdio_status;

View file

@ -8,17 +8,17 @@
* out of the OpenFirmware device tree and using it to populate an mii_bus.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/err.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */

View file

@ -468,7 +468,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
if (!netif_running(dev))
if (!netif_running(dev) || !sl->tty)
goto out;
/* May be we must check transmitter timeout here ?

View file

@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (start_of_descs != desc_offset)
goto err;
/* self check desc_offset from header*/
if (desc_offset >= skb_len)
/* self check desc_offset from header and make sure that the
* bounds of the metadata array are inside the SKB
*/
if (pkt_count * 2 + desc_offset >= skb_len)
goto err;
/* Packets must not overlap the metadata array */
skb_trim(skb, desc_offset);
if (pkt_count == 0)
goto err;

View file

@ -292,7 +292,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv)) {
if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}

View file

@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}

View file

@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
sizeof(tx_info->rate_driver_data));
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
sizeof(tx_info->status.status_driver_data));
return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@ -2501,6 +2501,16 @@ skip_tx_complete:
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
{
void *ptr = &tx_info->status;
memset(ptr + sizeof(tx_info->status.rates), 0,
sizeof(tx_info->status) -
sizeof(tx_info->status.rates) -
sizeof(tx_info->status.status_driver_data));
}
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@ -2512,6 +2522,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
ath_clear_tx_status(tx_info);
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@ -2526,6 +2538,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
tx_info->status.rates[i].count = 0;
tx_info->status.rates[i].idx = -1;
}
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@ -2547,16 +2566,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
tx_info->status.rates[i].count = 0;
tx_info->status.rates[i].idx = -1;
}
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
/* we report airtime in ath_tx_count_airtime(), don't report twice */
tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)

View file

@ -29,7 +29,7 @@
#define CNTL_OVER_MASK 0xFFFFFFFE
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0

View file

@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = {
};
static const struct regulator_desc wm8994_ldo_desc[] = {
{
.name = "LDO1",
.id = 1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
.vsel_reg = WM8994_LDO_1,
.vsel_mask = WM8994_LDO1_VSEL_MASK,
.ops = &wm8994_ldo1_ops,
.min_uV = 2400000,
.uV_step = 100000,
.enable_time = 3000,
.off_on_delay = 36000,
.owner = THIS_MODULE,
},
{
.name = "LDO2",
.id = 2,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
.vsel_reg = WM8994_LDO_2,
.vsel_mask = WM8994_LDO2_VSEL_MASK,
.ops = &wm8994_ldo2_ops,
.enable_time = 3000,
.off_on_delay = 36000,
.owner = THIS_MODULE,
},
};
static const struct regulator_desc wm8958_ldo_desc[] = {
{
.name = "LDO1",
.id = 1,
@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
* regulator core and we need not worry about it on the
* error path.
*/
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8994_ldo_desc[id],
&config);
if (ldo->wm8994->type == WM8994) {
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8994_ldo_desc[id],
&config);
} else {
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8958_ldo_desc[id],
&config);
}
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",

View file

@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
uint16_t cri_index;
int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep = ep->dd_data;
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
rc = -EINVAL;
goto put_ep;
}
if (beiscsi_ep->phba != phba) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
return -EEXIST;
rc = -EEXIST;
goto put_ep;
}
cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
if (phba->conn_table[cri_index]) {
@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep->ep_cid,
beiscsi_conn,
phba->conn_table[cri_index]);
return -EINVAL;
rc = -EINVAL;
goto put_ep;
}
}
@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
"BS_%d : cid %d phba->conn_table[%u]=%p\n",
beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
phba->conn_table[cri_index] = beiscsi_conn;
return 0;
put_ep:
iscsi_put_endpoint(ep);
return rc;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)

View file

@ -5810,6 +5810,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.destroy_session = beiscsi_session_destroy,
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = beiscsi_attr_is_visible,
.set_iface_param = beiscsi_iface_set_param,

View file

@ -1422,17 +1422,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
* Forcefully terminate all in progress connection recovery at the
* earliest, either in bind(), send_pdu(LOGIN), or conn_start()
*/
if (bnx2i_adapter_ready(hba))
return -EIO;
if (bnx2i_adapter_ready(hba)) {
ret_code = -EIO;
goto put_ep;
}
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
(bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
(bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
/* Peer disconnect via' FIN or RST */
return -EINVAL;
ret_code = -EINVAL;
goto put_ep;
}
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
ret_code = -EINVAL;
goto put_ep;
}
if (bnx2i_ep->hba != hba) {
/* Error - TCP connection does not belong to this device
@ -1443,7 +1449,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"belong to hba (%s)\n",
hba->netdev->name);
return -EEXIST;
ret_code = -EEXIST;
goto put_ep;
}
bnx2i_ep->conn = bnx2i_conn;
bnx2i_conn->ep = bnx2i_ep;
@ -1460,6 +1467,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
bnx2i_put_rq_buf(bnx2i_conn, 0);
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
put_ep:
iscsi_put_endpoint(ep);
return ret_code;
}
@ -2278,6 +2287,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.destroy_conn = bnx2i_conn_destroy,
.attr_is_visible = bnx2i_attr_is_visible,
.set_param = iscsi_set_param,

View file

@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
.unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,

View file

@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
.unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,

View file

@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
ppm->tformat.pgsz_idx_dflt);
if (err < 0)
return err;
goto put_ep;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
return -EINVAL;
if (err) {
err = -EINVAL;
goto put_ep;
}
/* calculate the tag idx bits needed for this conn based on cmds_max */
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
/* init recv engine */
iscsi_tcp_hdr_recv_prep(tcp_conn);
return 0;
put_ep:
iscsi_put_endpoint(ep);
return err;
}
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);

View file

@ -35,7 +35,7 @@
#define IBMVSCSIS_VERSION "v0.2"
#define INITIAL_SRP_LIMIT 800
#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024

View file

@ -1367,23 +1367,32 @@ void iscsi_session_failure(struct iscsi_session *session,
}
EXPORT_SYMBOL_GPL(iscsi_session_failure);
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_FAILED) {
spin_unlock_bh(&session->frwd_lock);
return;
}
if (session->state == ISCSI_STATE_FAILED)
return false;
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
spin_unlock_bh(&session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
iscsi_conn_error_event(conn->cls_conn, err);
return true;
}
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
struct iscsi_session *session = conn->session;
bool needs_evt;
spin_lock_bh(&session->frwd_lock);
needs_evt = iscsi_set_conn_failed(conn);
spin_unlock_bh(&session->frwd_lock);
if (needs_evt)
iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@ -2117,6 +2126,51 @@ done:
spin_unlock(&session->frwd_lock);
}
/**
* iscsi_conn_unbind - prevent queueing to conn.
* @cls_conn: iscsi conn ep is bound to.
* @is_active: is the conn in use for boot or is this for EH/termination
*
* This must be called by drivers implementing the ep_disconnect callout.
* It disables queueing to the connection from libiscsi in preparation for
* an ep_disconnect call.
*/
void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
{
struct iscsi_session *session;
struct iscsi_conn *conn;
if (!cls_conn)
return;
conn = cls_conn->dd_data;
session = conn->session;
/*
* Wait for iscsi_eh calls to exit. We don't wait for the tmf to
* complete or timeout. The caller just wants to know what's running
* is everything that needs to be cleaned up, and no cmds will be
* queued.
*/
mutex_lock(&session->eh_mutex);
iscsi_suspend_queue(conn);
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
* the state might still be in ISCSI_STATE_LOGGED_IN and
* allowing new cmds and TMFs.
*/
if (session->state == ISCSI_STATE_LOGGED_IN)
iscsi_set_conn_failed(conn);
}
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
struct iscsi_tm *hdr)
{

View file

@ -13614,6 +13614,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
/* Init cpu_map array */
lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {

View file

@ -2554,6 +2554,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_IS_LUN_VALID(sdev) \
(((sdev)->lun == 0) ? 1 : 0)
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)

View file

@ -2111,6 +2111,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
} else if (!MEGASAS_IS_LUN_VALID(sdev)) {
sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
return -ENXIO;
}
scan_target:
@ -2141,6 +2144,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
if (!MEGASAS_IS_LUN_VALID(sdev)) {
sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
return;
}
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)

View file

@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },

View file

@ -765,6 +765,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
/* Enable higher IQs and OQs, 32 to 63, bit 16 */
if (pm8001_ha->max_q_num > 32)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@ -1024,6 +1028,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
/*
* As per controller datasheet, after successful MPI
* initialization minimum 500ms delay is required before
* issuing commands.
*/
msleep(500);
return 0;
}
@ -1682,10 +1693,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
u32 mask;
mask = (u32)(1 << vec);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
if (vec < 32)
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
else
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@ -1701,12 +1713,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
u32 mask;
if (vec == 0xFF)
mask = 0xFFFFFFFF;
if (vec == 0xFF) {
/* disable all vectors 0-31, 32-63 */
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
} else if (vec < 32)
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
mask = (u32)(1 << vec);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);

View file

@ -387,6 +387,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct qedi_endpoint *qedi_ep;
struct iscsi_endpoint *ep;
int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@ -394,11 +395,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_ep = ep->dd_data;
if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
(qedi_ep->state == EP_STATE_TCP_RST_RCVD))
return -EINVAL;
(qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
rc = -EINVAL;
goto put_ep;
}
if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
rc = -EINVAL;
goto put_ep;
}
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
@ -408,13 +414,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_conn->cmd_cleanup_req = 0;
qedi_conn->cmd_cleanup_cmpl = 0;
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
return -EINVAL;
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
rc = -EINVAL;
goto put_ep;
}
spin_lock_init(&qedi_conn->tmf_work_lock);
INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
init_waitqueue_head(&qedi_conn->wait_queue);
return 0;
put_ep:
iscsi_put_endpoint(ep);
return rc;
}
static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
@ -1428,6 +1439,7 @@ struct iscsi_transport qedi_iscsi_transport = {
.destroy_session = qedi_session_destroy,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.start_conn = qedi_conn_start,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qedi_conn_destroy,

View file

@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.start_conn = qla4xxx_conn_start,
.create_conn = qla4xxx_conn_create,
.bind_conn = qla4xxx_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
@ -3237,6 +3238,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
iscsi_put_endpoint(ep);
return 0;
}

View file

@ -86,16 +86,10 @@ struct iscsi_internal {
struct transport_container session_cont;
};
/* Worker to perform connection failure on unresponsive connections
* completely in kernel space.
*/
static void stop_conn_work_fn(struct work_struct *work);
static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
static struct workqueue_struct *iscsi_destroy_workq;
static struct workqueue_struct *iscsi_conn_cleanup_workq;
static DEFINE_IDA(iscsi_sess_ida);
/*
@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
void iscsi_put_endpoint(struct iscsi_endpoint *ep)
{
put_device(&ep->dev);
}
EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
/**
* iscsi_lookup_endpoint - get ep from handle
* @handle: endpoint handle
*
* Caller must do a iscsi_put_endpoint.
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
struct iscsi_endpoint *ep;
struct device *dev;
dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
if (!dev)
return NULL;
ep = iscsi_dev_to_endpoint(dev);
/*
* we can drop this now because the interface will prevent
* removals and lookups from racing.
*/
put_device(dev);
return ep;
return iscsi_dev_to_endpoint(dev);
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@ -1598,12 +1597,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
/*
* conn_mutex protects the {start,bind,stop,destroy}_conn from racing
* against the kernel stop_connection recovery mechanism
*/
static DEFINE_MUTEX(conn_mutex);
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
@ -2225,6 +2218,155 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
{
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
switch (flag) {
case STOP_CONN_RECOVER:
WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
flag);
return;
}
conn->transport->stop_conn(conn, flag);
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
{
struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
struct iscsi_endpoint *ep;
ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
if (!conn->ep || !session->transport->ep_disconnect)
return;
ep = conn->ep;
conn->ep = NULL;
session->transport->unbind_conn(conn, is_active);
session->transport->ep_disconnect(ep);
ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
}
static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
struct iscsi_endpoint *ep,
bool is_active)
{
/* Check if this was a conn error and the kernel took ownership */
spin_lock_irq(&conn->lock);
if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
spin_unlock_irq(&conn->lock);
iscsi_ep_disconnect(conn, is_active);
} else {
spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
mutex_unlock(&conn->ep_mutex);
flush_work(&conn->cleanup_work);
/*
* Userspace is now done with the EP so we can release the ref
* iscsi_cleanup_conn_work_fn took.
*/
iscsi_put_endpoint(ep);
mutex_lock(&conn->ep_mutex);
}
}
static int iscsi_if_stop_conn(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
int flag = ev->u.stop_conn.flag;
struct iscsi_cls_conn *conn;
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
if (!conn)
return -EINVAL;
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
/*
* If this is a termination we have to call stop_conn with that flag
* so the correct states get set. If we haven't run the work yet try to
* avoid the extra run.
*/
if (flag == STOP_CONN_TERM) {
cancel_work_sync(&conn->cleanup_work);
iscsi_stop_conn(conn, flag);
} else {
/*
* For offload, when iscsid is restarted it won't know about
* existing endpoints so it can't do a ep_disconnect. We clean
* it up here for userspace.
*/
mutex_lock(&conn->ep_mutex);
if (conn->ep)
iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
mutex_unlock(&conn->ep_mutex);
/*
* Figure out if it was the kernel or userspace initiating this.
*/
spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
}
/*
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
cleanup_work);
struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
mutex_lock(&conn->ep_mutex);
/*
* Get a ref to the ep, so we don't release its ID until after
* userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
if (conn->ep)
get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
/*
* If the user has set up for the session to never timeout
* then hang like they wanted. For all other cases fail right
* away since userspace is not going to relogin.
*/
if (session->recovery_tmo > 0)
session->recovery_tmo = 0;
}
iscsi_stop_conn(conn, STOP_CONN_RECOVER);
mutex_unlock(&conn->ep_mutex);
ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
}
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@ -2263,11 +2405,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_LIST_HEAD(&conn->conn_list_err);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
conn->state = ISCSI_CONN_DOWN;
WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@ -2321,7 +2464,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
list_del(&conn->conn_list_err);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
@ -2448,77 +2590,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
/*
* This can be called without the rx_queue_mutex, if invoked by the kernel
* stop work. But, in that case, it is guaranteed not to race with
* iscsi_destroy by conn_mutex.
*/
static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
{
/*
* It is important that this path doesn't rely on
* rx_queue_mutex, otherwise, a thread doing allocation on a
* start_session/start_connection could sleep waiting on a
* writeback to a failed iscsi device, that cannot be recovered
* because the lock is held. If we don't hold it here, the
* kernel stop_conn_work_fn has a chance to stop the broken
* session and resolve the allocation.
*
* Still, the user invoked .stop_conn() needs to be serialized
* with stop_conn_work_fn by a private mutex. Not pretty, but
* it works.
*/
mutex_lock(&conn_mutex);
switch (flag) {
case STOP_CONN_RECOVER:
conn->state = ISCSI_CONN_FAILED;
break;
case STOP_CONN_TERM:
conn->state = ISCSI_CONN_DOWN;
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn,
"invalid stop flag %d\n", flag);
goto unlock;
}
conn->transport->stop_conn(conn, flag);
unlock:
mutex_unlock(&conn_mutex);
}
static void stop_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn, *tmp;
unsigned long flags;
LIST_HEAD(recovery_list);
spin_lock_irqsave(&connlock, flags);
if (list_empty(&connlist_err)) {
spin_unlock_irqrestore(&connlock, flags);
return;
}
list_splice_init(&connlist_err, &recovery_list);
spin_unlock_irqrestore(&connlock, flags);
list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
uint32_t sid = iscsi_conn_get_sid(conn);
struct iscsi_cls_session *session;
session = iscsi_session_lookup(sid);
if (session) {
if (system_state != SYSTEM_RUNNING) {
session->recovery_tmo = 0;
iscsi_if_stop_conn(conn, STOP_CONN_TERM);
} else {
iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
}
}
list_del_init(&conn->conn_list_err);
}
}
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@ -2527,11 +2598,31 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
unsigned long flags;
int state;
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list_err, &connlist_err);
spin_unlock_irqrestore(&connlock, flags);
queue_work(system_unbound_wq, &stop_conn_work);
spin_lock_irqsave(&conn->lock, flags);
/*
* Userspace will only do a stop call if we are at least bound. And, we
* only need to do the in kernel cleanup if in the UP state so cmds can
* be released to upper layers. If in other states just wait for
* userspace to avoid races that can leave the cleanup_work queued.
*/
state = READ_ONCE(conn->state);
switch (state) {
case ISCSI_CONN_BOUND:
case ISCSI_CONN_UP:
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
&conn->flags)) {
queue_work(iscsi_conn_cleanup_workq,
&conn->cleanup_work);
}
break;
default:
ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
state);
break;
}
spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@ -2861,26 +2952,17 @@ static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
unsigned long flags;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
spin_lock_irqsave(&connlock, flags);
if (!list_empty(&conn->conn_list_err)) {
spin_unlock_irqrestore(&connlock, flags);
return -EAGAIN;
}
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
flush_work(&conn->cleanup_work);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
mutex_unlock(&conn_mutex);
return 0;
}
@ -2890,7 +2972,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
int err = 0, value = 0;
int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@ -2907,8 +2989,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) {
state = READ_ONCE(conn->state);
if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@ -2968,15 +3050,22 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
conn = ep->conn;
if (conn) {
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
conn->state = ISCSI_CONN_FAILED;
if (!conn) {
/*
* conn was not even bound yet, so we can't get iscsi conn
* failures yet.
*/
transport->ep_disconnect(ep);
goto put_ep;
}
transport->ep_disconnect(ep);
mutex_lock(&conn->ep_mutex);
iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
return 0;
}
@ -3002,6 +3091,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
rc = iscsi_if_ep_disconnect(transport,
@ -3632,18 +3722,123 @@ exit_host_stats:
return err;
}
static int iscsi_if_transport_conn(struct iscsi_transport *transport,
struct nlmsghdr *nlh)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn = NULL;
struct iscsi_endpoint *ep;
uint32_t pdu_len;
int err = 0;
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_CONN:
return iscsi_if_create_conn(transport, ev);
case ISCSI_UEVENT_DESTROY_CONN:
return iscsi_if_destroy_conn(transport, ev);
case ISCSI_UEVENT_STOP_CONN:
return iscsi_if_stop_conn(transport, ev);
}
/*
* The following cmds need to be run under the ep_mutex so in kernel
* conn cleanup (ep_disconnect + unbind and conn) is not done while
* these are running. They also must not run if we have just run a conn
* cleanup because they would set the state in a way that might allow
* IO or send IO themselves.
*/
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid,
ev->u.start_conn.cid);
break;
case ISCSI_UEVENT_BIND_CONN:
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
break;
case ISCSI_UEVENT_SEND_PDU:
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
break;
}
if (!conn)
return -EINVAL;
mutex_lock(&conn->ep_mutex);
spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
break;
}
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
if (ep) {
ep->conn = conn;
conn->ep = ep;
iscsi_put_endpoint(ep);
} else {
err = -ENOTCONN;
iscsi_cls_conn_printk(KERN_ERR, conn,
"Could not set ep conn binding\n");
}
break;
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
WRITE_ONCE(conn->state, ISCSI_CONN_UP);
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
if ((ev->u.send_pdu.hdr_size > pdu_len) ||
(ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
err = -EINVAL;
break;
}
ev->r.retcode = transport->send_pdu(conn,
(struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
(char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
ev->u.send_pdu.data_size);
break;
default:
err = -ENOSYS;
}
mutex_unlock(&conn->ep_mutex);
return err;
}
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
u32 portid;
u32 pdu_len;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
@ -3684,6 +3879,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@ -3708,7 +3904,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
queue_work(iscsi_destroy_workq, &session->destroy_work);
queue_work(system_unbound_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
@ -3719,89 +3915,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
else
err = -EINVAL;
break;
case ISCSI_UEVENT_CREATE_CONN:
err = iscsi_if_create_conn(transport, ev);
break;
case ISCSI_UEVENT_DESTROY_CONN:
err = iscsi_if_destroy_conn(transport, ev);
break;
case ISCSI_UEVENT_BIND_CONN:
session = iscsi_session_lookup(ev->u.b_conn.sid);
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
if (conn && conn->ep)
iscsi_if_ep_disconnect(transport, conn->ep->id);
if (!session || !conn) {
err = -EINVAL;
break;
}
mutex_lock(&conn_mutex);
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_BOUND;
mutex_unlock(&conn_mutex);
if (ev->r.retcode || !transport->ep_connect)
break;
ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
if (ep) {
ep->conn = conn;
mutex_lock(&conn->ep_mutex);
conn->ep = ep;
mutex_unlock(&conn->ep_mutex);
} else
iscsi_cls_conn_printk(KERN_ERR, conn,
"Could not set ep conn "
"binding\n");
break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
break;
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
if (conn) {
mutex_lock(&conn_mutex);
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_UP;
mutex_unlock(&conn_mutex);
}
else
err = -EINVAL;
break;
case ISCSI_UEVENT_CREATE_CONN:
case ISCSI_UEVENT_DESTROY_CONN:
case ISCSI_UEVENT_STOP_CONN:
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
if (conn)
iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_START_CONN:
case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
if ((ev->u.send_pdu.hdr_size > pdu_len) ||
(ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
err = -EINVAL;
break;
}
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
if (conn) {
mutex_lock(&conn_mutex);
ev->r.retcode = transport->send_pdu(conn,
(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
ev->u.send_pdu.data_size);
mutex_unlock(&conn_mutex);
}
else
err = -EINVAL;
err = iscsi_if_transport_conn(transport, nlh);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@ -3991,10 +4114,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
int conn_state = READ_ONCE(conn->state);
if (conn->state >= 0 &&
conn->state < ARRAY_SIZE(connection_state_names))
state = connection_state_names[conn->state];
if (conn_state >= 0 &&
conn_state < ARRAY_SIZE(connection_state_names))
state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
@ -4649,6 +4773,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
int err;
BUG_ON(!tt);
WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
priv = iscsi_if_transport_lookup(tt);
if (priv)
@ -4803,10 +4928,10 @@ static __init int iscsi_transport_init(void)
goto release_nls;
}
iscsi_destroy_workq = alloc_workqueue("%s",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
1, "iscsi_destroy");
if (!iscsi_destroy_workq) {
iscsi_conn_cleanup_workq = alloc_workqueue("%s",
WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
"iscsi_conn_cleanup");
if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
goto destroy_wq;
}
@ -4836,7 +4961,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
destroy_workqueue(iscsi_destroy_workq);
destroy_workqueue(iscsi_conn_cleanup_workq);
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);

View file

@ -1676,6 +1676,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
mutex_lock(&udev->cmdr_lock);
page = tcmu_get_block_page(udev, dbi);
if (likely(page)) {
get_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
@ -1714,6 +1715,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
/* For the vmalloc()ed cmd area pages */
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
get_page(page);
} else {
uint32_t dbi;
@ -1724,7 +1726,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
get_page(page);
vmf->page = page;
return 0;
}

View file

@ -2570,7 +2570,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_path *path = NULL;
LIST_HEAD(dirty);
struct list_head *io = &cur_trans->io_bgs;
int num_started = 0;
int loops = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
@ -2636,7 +2635,6 @@ again:
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
/*
@ -2737,7 +2735,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
int should_put;
struct btrfs_path *path;
struct list_head *io = &cur_trans->io_bgs;
int num_started = 0;
path = btrfs_alloc_path();
if (!path)
@ -2795,7 +2792,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
list_add_tail(&cache->io_list, io);
} else {

View file

@ -1596,9 +1596,10 @@ again:
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
btrfs_put_root(root);
if (ret == -EEXIST)
if (ret == -EEXIST) {
btrfs_put_root(root);
goto again;
}
goto fail;
}
return root;

View file

@ -2833,8 +2833,9 @@ out:
return ret;
}
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
@ -2866,6 +2867,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
ret = file_modified(file);
if (ret)
goto out_only_mutex;
lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
lockend = round_down(offset + len,
btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
@ -3301,7 +3306,7 @@ static long btrfs_fallocate(struct file *file, int mode,
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
return btrfs_punch_hole(inode, offset, len);
return btrfs_punch_hole(file, offset, len);
/*
* Only trigger disk allocation, don't trigger qgroup reserve
@ -3323,6 +3328,10 @@ static long btrfs_fallocate(struct file *file, int mode,
goto out;
}
ret = file_modified(file);
if (ret)
goto out;
/*
* TODO: Move these two operations after we have checked
* accurate reserved space, or fallocate can still fail but

View file

@ -995,7 +995,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
int ret = 0;
if (btrfs_is_free_space_inode(inode)) {
WARN_ON_ONCE(1);
ret = -EINVAL;
goto out_unlock;
}

View file

@ -4219,10 +4219,12 @@ static int balance_kthread(void *data)
struct btrfs_fs_info *fs_info = data;
int ret = 0;
sb_start_write(fs_info->sb);
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
mutex_unlock(&fs_info->balance_mutex);
sb_end_write(fs_info->sb);
return ret;
}

View file

@ -97,6 +97,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
if (rc != 1)
return -EINVAL;
if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
return -EINVAL;
rc = symlink_hash(link_len, link_str, md5_hash);
if (rc) {
cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);

View file

@ -563,10 +563,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
if (_sz == PMD_SIZE) \
tlb_flush_pmd_range(tlb, address, _sz); \
else if (_sz == PUD_SIZE) \
if (_sz >= P4D_SIZE) \
tlb_flush_p4d_range(tlb, address, _sz); \
else if (_sz >= PUD_SIZE) \
tlb_flush_pud_range(tlb, address, _sz); \
else if (_sz >= PMD_SIZE) \
tlb_flush_pmd_range(tlb, address, _sz); \
else \
tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)

View file

@ -236,6 +236,7 @@ typedef struct ax25_dev {
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_dama_info dama;
#endif
refcount_t refcount;
} ax25_dev;
typedef struct ax25_cb {
@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
}
}
static inline void ax25_dev_hold(ax25_dev *ax25_dev)
{
refcount_inc(&ax25_dev->refcount);
}
static inline void ax25_dev_put(ax25_dev *ax25_dev)
{
if (refcount_dec_and_test(&ax25_dev->refcount)) {
kfree(ax25_dev);
}
}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;

View file

@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
__be16 vlan_tci;
};
__be16 vlan_tpid;
__be16 vlan_eth_type;
u16 padding;
};
struct flow_dissector_mpls_lse {

View file

@ -421,6 +421,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *);
extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
int);
extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active);
extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err);

View file

@ -82,6 +82,7 @@ struct iscsi_transport {
void (*destroy_session) (struct iscsi_cls_session *session);
struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
uint32_t cid);
void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active);
int (*bind_conn) (struct iscsi_cls_session *session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_eph, int is_leading);
@ -196,15 +197,25 @@ enum iscsi_connection_state {
ISCSI_CONN_BOUND,
};
#define ISCSI_CLS_CONN_BIT_CLEANUP 1
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
struct list_head conn_list_err; /* item in connlist_err */
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
/*
* This protects the conn startup and binding/unbinding of the ep to
* the conn. Unbinding includes ep_disconnect and stop_conn.
*/
struct mutex ep_mutex;
struct iscsi_endpoint *ep;
/* Used when accessing flags and queueing work. */
spinlock_t lock;
unsigned long flags;
struct work_struct cleanup_work;
struct device dev; /* sysfs transport/container device */
enum iscsi_connection_state state;
};
@ -443,6 +454,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
extern void iscsi_put_endpoint(struct iscsi_endpoint *ep);
extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
struct iscsi_transport *t,

View file

@ -1874,17 +1874,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_STRUCT__entry(
__field(const void *, dr)
__field(u32, xid)
__string(addr, dr->xprt->xpt_remotebuf)
__array(__u8, addr, INET6_ADDRSTRLEN + 10)
),
TP_fast_assign(
__entry->dr = dr;
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
__assign_str(addr, dr->xprt->xpt_remotebuf);
snprintf(__entry->addr, sizeof(__entry->addr) - 1,
"%pISpc", (struct sockaddr *)&dr->addr);
),
TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr,
__entry->xid)
);

View file

@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
if (unlikely(is_swiotlb_buffer(phys)))
swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
swiotlb_tbl_unmap_single(dev, phys, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
#endif /* _KERNEL_DMA_DIRECT_H */

View file

@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
node_to_cpumask[n]);
/* Ensure that only CPUs which are in both masks are set */
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
if (++curvec == last_affv)
curvec = firstvec;
}

View file

@ -347,7 +347,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
/* There shouldn't be any pending callbacks on an offline CPU. */
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
!warned && !llist_empty(head))) {
!warned && entry != NULL)) {
warned = true;
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

Some files were not shown because too many files have changed in this diff Show more