summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c12
-rw-r--r--drivers/acpi/apei/apei-base.c4
-rw-r--r--drivers/acpi/fan.c1
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/counter/ti-eqep.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c33
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c4
-rw-r--r--drivers/dax/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c17
-rw-r--r--drivers/dma/idxd/device.c31
-rw-r--r--drivers/dma/idxd/idxd.h3
-rw-r--r--drivers/dma/idxd/init.c5
-rw-r--r--drivers/dma/idxd/registers.h25
-rw-r--r--drivers/dma/idxd/submit.c2
-rw-r--r--drivers/dma/ioat/dca.c10
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ti/k3-udma-private.c2
-rw-r--r--drivers/dma/ti/omap-dma.c37
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c1
-rw-r--r--drivers/hid/hid-cypress.c44
-rw-r--r--drivers/hid/hid-ids.h9
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-ite.c61
-rw-r--r--drivers/hid/hid-logitech-dj.c22
-rw-r--r--drivers/hid/hid-logitech-hidpp.c32
-rw-r--r--drivers/hid/hid-mcp2221.c48
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c9
-rw-r--r--drivers/hv/hv.c8
-rw-r--r--drivers/iio/accel/kxcjk-1013.c51
-rw-r--r--drivers/iio/adc/ingenic-adc.c34
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c6
-rw-r--r--drivers/iio/adc/stm32-adc-core.c41
-rw-r--r--drivers/iio/adc/stm32-adc.c50
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c16
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c6
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/infiniband/Kconfig3
-rw-r--r--drivers/infiniband/core/cm.c12
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig3
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/Kconfig1
-rw-r--r--drivers/input/keyboard/sunkbd.c41
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c10
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c2
-rw-r--r--drivers/input/serio/i8042.c12
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c5
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c51
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c13
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c28
-rw-r--r--drivers/net/can/kvaser_pciefd.c4
-rw-r--r--drivers/net/can/m_can/Kconfig3
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c23
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c32
-rw-r--r--drivers/net/can/ti_hecc.c13
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/dsa/lantiq_gswip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c59
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c62
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h115
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c9
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/ipa/gsi_trans.c15
-rw-r--r--drivers/net/netdevsim/dev.c2
-rw-r--r--drivers/net/netdevsim/health.c1
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c1
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c1
-rw-r--r--drivers/net/phy/smsc.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/nvme/host/core.c25
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/regulator/core.c43
-rw-r--r--drivers/regulator/pfuze100-regulator.c13
-rw-r--r--drivers/regulator/ti-abb-regulator.c12
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/scsi/libiscsi.c23
-rw-r--r--drivers/spi/spi-bcm-qspi.c34
-rw-r--r--drivers/spi/spi-bcm2835.c24
-rw-r--r--drivers/spi/spi-bcm2835aux.c20
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-dw-core.c4
-rw-r--r--drivers/spi/spi-fsi.c2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c3
-rw-r--r--drivers/spi/spi-npcm-fiu.c2
-rw-r--r--drivers/spi/spi.c81
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c15
-rw-r--r--drivers/staging/ralink-gdma/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c17
-rw-r--r--drivers/tee/amdtee/amdtee_private.h8
-rw-r--r--drivers/tee/amdtee/core.c26
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c18
-rw-r--r--drivers/tty/serial/ar933x_uart.c6
-rw-r--r--drivers/tty/serial/imx.c30
-rw-r--r--drivers/vdpa/Kconfig1
-rw-r--r--drivers/vhost/scsi.c397
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/video/fbdev/hyperv_fb.c7
175 files changed, 1777 insertions, 728 deletions
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index ecc39983e946..669392f31d4e 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -49,15 +49,25 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
if (!tty->ops->write)
return -EOPNOTSUPP;
+
+ mutex_lock(&speakup_tty_mutex);
+ if (speakup_tty) {
+ mutex_unlock(&speakup_tty_mutex);
+ return -EBUSY;
+ }
speakup_tty = tty;
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
- if (!ldisc_data)
+ if (!ldisc_data) {
+ speakup_tty = NULL;
+ mutex_unlock(&speakup_tty_mutex);
return -ENOMEM;
+ }
init_completion(&ldisc_data->completion);
ldisc_data->buf_free = true;
speakup_tty->disc_data = ldisc_data;
+ mutex_unlock(&speakup_tty_mutex);
return 0;
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 552fd9ffaca4..3294cc8dc073 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -633,6 +633,10 @@ int apei_map_generic_address(struct acpi_generic_address *reg)
if (rc)
return rc;
+ /* IO space doesn't need mapping */
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ return 0;
+
if (!acpi_os_map_generic_address(reg))
return -ENXIO;
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 48354f82fba6..66c3983f0ccc 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -352,6 +352,7 @@ static int acpi_fan_get_fps(struct acpi_device *device)
struct acpi_fan_fps *fps = &fan->fps[i];
snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ sysfs_attr_init(&fps->dev_attr.attr);
fps->dev_attr.show = show_state;
fps->dev_attr.store = NULL;
fps->dev_attr.attr.name = fps->name;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 7af74fb450a0..09ad73361879 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -1706,6 +1706,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
atomic_inc(&vcc->stats->tx_err);
+ dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
return -EIO;
}
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index e27771df8e23..a60aee1a1a29 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -368,7 +368,7 @@ static const struct regmap_config ti_eqep_regmap32_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = 0x24,
+ .max_register = QUPRD,
};
static const struct regmap_config ti_eqep_regmap16_config = {
@@ -376,7 +376,7 @@ static const struct regmap_config ti_eqep_regmap16_config = {
.reg_bits = 16,
.val_bits = 16,
.reg_stride = 2,
- .max_register = 0x1e,
+ .max_register = QCPRDLAT,
};
static int ti_eqep_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index e855e8612a67..78318508a6d6 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
@@ -228,12 +229,17 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
+ struct device *dev = &sdev->dev;
handle = sdev->handle;
if (!handle || !handle->perf_ops)
return -ENODEV;
+ /* dummy clock provider as needed by OPP if clocks property is used */
+ if (of_find_property(dev->of_node, "#clock-cells", NULL))
+ devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
if (ret) {
dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 4b4079f51559..7eb2c56c65de 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -42,6 +42,8 @@ static const struct tegra186_cpufreq_cluster_info tegra186_clusters[] = {
struct tegra186_cpufreq_cluster {
const struct tegra186_cpufreq_cluster_info *info;
struct cpufreq_frequency_table *table;
+ u32 ref_clk_khz;
+ u32 div;
};
struct tegra186_cpufreq_data {
@@ -94,7 +96,7 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_frequency_table *tbl;
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_policy *policy;
void __iomem *edvd_reg;
unsigned int i, freq = 0;
@@ -104,17 +106,23 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
if (!policy)
return 0;
- tbl = policy->freq_table;
edvd_reg = policy->driver_data;
ndiv = readl(edvd_reg) & EDVD_CORE_VOLT_FREQ_F_MASK;
- for (i = 0; tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
- if ((tbl[i].driver_data & EDVD_CORE_VOLT_FREQ_F_MASK) == ndiv) {
- freq = tbl[i].frequency;
- break;
+ for (i = 0; i < data->num_clusters; i++) {
+ struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
+ int core;
+
+ for (core = 0; core < ARRAY_SIZE(cluster->info->cpus); core++) {
+ if (cluster->info->cpus[core] != policy->cpu)
+ continue;
+
+ freq = (cluster->ref_clk_khz * ndiv) / cluster->div;
+ goto out;
}
}
+out:
cpufreq_cpu_put(policy);
return freq;
@@ -133,7 +141,7 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
static struct cpufreq_frequency_table *init_vhint_table(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
- unsigned int cluster_id)
+ struct tegra186_cpufreq_cluster *cluster)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
@@ -152,7 +160,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
memset(&req, 0, sizeof(req));
req.addr = phys;
- req.cluster_id = cluster_id;
+ req.cluster_id = cluster->info->bpmp_cluster_id;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_CPU_VHINT;
@@ -185,6 +193,9 @@ static struct cpufreq_frequency_table *init_vhint_table(
goto free;
}
+ cluster->ref_clk_khz = data->ref_clk_hz / 1000;
+ cluster->div = data->pdiv * data->mdiv;
+
for (i = data->vfloor, j = 0; i <= data->vceil; i++) {
struct cpufreq_frequency_table *point;
u16 ndiv = data->ndiv[i];
@@ -202,8 +213,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
point = &table[j++];
point->driver_data = edvd_val;
- point->frequency = data->ref_clk_hz * ndiv / data->pdiv /
- data->mdiv / 1000;
+ point->frequency = (cluster->ref_clk_khz * ndiv) / cluster->div;
}
table[j].frequency = CPUFREQ_TABLE_END;
@@ -245,8 +255,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
cluster->info = &tegra186_clusters[i];
- cluster->table = init_vhint_table(
- pdev, bpmp, cluster->info->bpmp_cluster_id);
+ cluster->table = init_vhint_table(pdev, bpmp, cluster);
if (IS_ERR(cluster->table)) {
err = PTR_ERR(cluster->table);
goto put_bpmp;
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index e8956706a291..191966dc8d02 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -189,7 +189,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
}
local_fiq_disable();
- tegra_pm_set_cpu_in_lp2();
+ RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
cpu_pm_enter();
switch (index) {
@@ -207,7 +207,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
}
cpu_pm_exit();
- tegra_pm_clear_cpu_in_lp2();
+ RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
local_fiq_enable();
return err ?: index;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 567428e10b7b..d2834c2cfa10 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -50,7 +50,6 @@ config DEV_DAX_HMEM
Say M if unsure.
config DEV_DAX_HMEM_DEVICES
- depends on NUMA_KEEP_MEMINFO # for phys_to_target_node()
depends on DEV_DAX_HMEM && DAX=y
def_bool y
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 7974fa0400d8..962cbb5e5f7f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1039,16 +1039,15 @@ static int get_dma_id(struct dma_device *device)
static int __dma_async_device_channel_register(struct dma_device *device,
struct dma_chan *chan)
{
- int rc = 0;
+ int rc;
chan->local = alloc_percpu(typeof(*chan->local));
if (!chan->local)
- goto err_out;
+ return -ENOMEM;
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
if (!chan->dev) {
- free_percpu(chan->local);
- chan->local = NULL;
- goto err_out;
+ rc = -ENOMEM;
+ goto err_free_local;
}
/*
@@ -1061,7 +1060,8 @@ static int __dma_async_device_channel_register(struct dma_device *device,
if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
- goto err_out;
+ rc = chan->chan_id;
+ goto err_free_dev;
}
chan->dev->device.class = &dma_devclass;
@@ -1082,9 +1082,10 @@ static int __dma_async_device_channel_register(struct dma_device *device,
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
- err_out:
- free_percpu(chan->local);
+ err_free_dev:
kfree(chan->dev);
+ err_free_local:
+ free_percpu(chan->local);
return rc;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 200b9109cacf..663344987e3f 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -271,7 +271,7 @@ int idxd_wq_map_portal(struct idxd_wq *wq)
resource_size_t start;
start = pci_resource_start(pdev, IDXD_WQ_BAR);
- start = start + wq->id * IDXD_PORTAL_SIZE;
+ start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
if (!wq->dportal)
@@ -295,7 +295,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
int i, wq_offset;
lockdep_assert_held(&idxd->dev_lock);
- memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
+ memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->size = 0;
wq->group = NULL;
@@ -304,8 +304,8 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
- for (i = 0; i < 8; i++) {
- wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
iowrite32(0, idxd->reg_base + wq_offset);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
wq->id, i, wq_offset,
@@ -539,10 +539,10 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (!wq->group)
return 0;
- memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+ memset(wq->wqcfg, 0, idxd->wqcfg_size);
/* byte 0-3 */
- wq->wqcfg.wq_size = wq->size;
+ wq->wqcfg->wq_size = wq->size;
if (wq->size == 0) {
dev_warn(dev, "Incorrect work queue size: 0\n");
@@ -550,22 +550,21 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
}
/* bytes 4-7 */
- wq->wqcfg.wq_thresh = wq->threshold;
+ wq->wqcfg->wq_thresh = wq->threshold;
/* byte 8-11 */
- wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
- wq->wqcfg.mode = 1;
-
- wq->wqcfg.priority = wq->priority;
+ wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ wq->wqcfg->mode = 1;
+ wq->wqcfg->priority = wq->priority;
/* bytes 12-15 */
- wq->wqcfg.max_xfer_shift = ilog2(wq->max_xfer_bytes);
- wq->wqcfg.max_batch_shift = ilog2(wq->max_batch_size);
+ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
+ wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
- for (i = 0; i < 8; i++) {
- wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
- iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
wq->id, i, wq_offset,
ioread32(idxd->reg_base + wq_offset));
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index c64df197e724..d48f193daacc 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -103,7 +103,7 @@ struct idxd_wq {
u32 priority;
enum idxd_wq_state state;
unsigned long flags;
- union wqcfg wqcfg;
+ union wqcfg *wqcfg;
u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
@@ -183,6 +183,7 @@ struct idxd_device {
int max_wq_size;
int token_limit;
int nr_tokens; /* non-reserved tokens */
+ unsigned int wqcfg_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 11e5ce168177..0a4432b063b5 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -178,6 +178,9 @@ static int idxd_setup_internals(struct idxd_device *idxd)
wq->idxd_cdev.minor = -1;
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
+ wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
+ if (!wq->wqcfg)
+ return -ENOMEM;
}
for (i = 0; i < idxd->max_engines; i++) {
@@ -251,6 +254,8 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+ idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
+ dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
/* reading operation capabilities */
for (i = 0; i < 4; i++) {
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index a39e7ae6b3d9..54390334c243 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -8,7 +8,7 @@
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
-#define IDXD_PORTAL_SIZE 0x4000
+#define IDXD_PORTAL_SIZE PAGE_SIZE
/* MMIO Device BAR0 Registers */
#define IDXD_VER_OFFSET 0x00
@@ -43,7 +43,8 @@ union wq_cap_reg {
struct {
u64 total_wq_size:16;
u64 num_wqs:8;
- u64 rsvd:24;
+ u64 wqcfg_size:4;
+ u64 rsvd:20;
u64 shared_mode:1;
u64 dedicated_mode:1;
u64 rsvd2:1;
@@ -55,6 +56,7 @@ union wq_cap_reg {
u64 bits;
} __packed;
#define IDXD_WQCAP_OFFSET 0x20
+#define IDXD_WQCFG_MIN 5
union group_cap_reg {
struct {
@@ -333,4 +335,23 @@ union wqcfg {
};
u32 bits[8];
} __packed;
+
+/*
+ * This macro calculates the offset into the WQCFG register
+ * idxd - struct idxd *
+ * n - wq id
+ * ofs - the index of the 32b dword for the config register
+ *
+ * The WQCFG register block is divided into groups per each wq. The n index
+ * allows us to move to the register group that's for that particular wq.
+ * Each register is 32bits. The ofs gives us the number of register to access.
+ */
+#define WQCFG_OFFSET(_idxd_dev, n, ofs) \
+({\
+ typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \
+ (__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \
+})
+
+#define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
+
#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 156a1ee233aa..417048e3c42a 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -74,7 +74,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
- portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+ portal = wq->dportal;
/*
* The wmb() flushes writes to coherent DMA data before possibly
* triggering a DMA read. The wmb() is necessary even on UP because
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 0be385587c4c..289c59ed74b9 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -40,16 +40,6 @@
#define DCA2_TAG_MAP_BYTE3 0x82
#define DCA2_TAG_MAP_BYTE4 0x82
-/* verify if tag map matches expected values */
-static inline int dca2_tag_map_valid(u8 *tag_map)
-{
- return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
- (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
- (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
- (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
- (tag_map[4] == DCA2_TAG_MAP_BYTE4));
-}
-
/*
* "Legacy" DCA systems do not implement the DCA register set in the
* I/OAT device. Software needs direct support for their tag mappings.
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index e9f0101d92fa..0f5c19370f6d 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2799,7 +2799,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
* If burst size is smaller than bus width then make sure we only
* transfer one at a time to avoid a burst stradling an MFIFO entry.
*/
- if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
+ if (burst * 8 < pl330->pcfg.data_bus_width)
desc->rqcfg.brst_len = 1;
desc->bytes_requested = len;
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index aa24e554f7b4..8563a392f30b 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(xudma_rflow_is_gp);
#define XUDMA_GET_PUT_RESOURCE(res) \
struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
{ \
- return __udma_reserve_##res(ud, false, id); \
+ return __udma_reserve_##res(ud, UDMA_TP_NORMAL, id); \
} \
EXPORT_SYMBOL(xudma_##res##_get); \
\
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index c9fe5e3a6b55..268a08058714 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1522,29 +1522,38 @@ static void omap_dma_free(struct omap_dmadev *od)
}
}
+/* Currently used by omap2 & 3 to block deeper SoC idle states */
+static bool omap_dma_busy(struct omap_dmadev *od)
+{
+ struct omap_chan *c;
+ int lch = -1;
+
+ while (1) {
+ lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
+ if (lch >= od->lch_count)
+ break;
+ c = od->lch_map[lch];
+ if (!c)
+ continue;
+ if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
+ return true;
+ }
+
+ return false;
+}
+
/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
static int omap_dma_busy_notifier(struct notifier_block *nb,
unsigned long cmd, void *v)
{
struct omap_dmadev *od;
- struct omap_chan *c;
- int lch = -1;
od = container_of(nb, struct omap_dmadev, nb);
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
- while (1) {
- lch = find_next_bit(od->lch_bitmap, od->lch_count,
- lch + 1);
- if (lch >= od->lch_count)
- break;
- c = od->lch_map[lch];
- if (!c)
- continue;
- if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
- return NOTIFY_BAD;
- }
+ if (omap_dma_busy(od))
+ return NOTIFY_BAD;
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
@@ -1595,6 +1604,8 @@ static int omap_dma_context_notifier(struct notifier_block *nb,
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
+ if (omap_dma_busy(od))
+ return NOTIFY_BAD;
omap_dma_context_save(od);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index ecff35402860..22faea653ea8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -517,8 +517,8 @@ struct xilinx_dma_device {
#define to_dma_tx_descriptor(tx) \
container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
- cond, delay_us, timeout_us)
+ readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
+ val, cond, delay_us, timeout_us)
/* IO accessors */
static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
@@ -948,8 +948,10 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
{
struct xilinx_cdma_tx_segment *cdma_seg;
struct xilinx_axidma_tx_segment *axidma_seg;
+ struct xilinx_aximcdma_tx_segment *aximcdma_seg;
struct xilinx_cdma_desc_hw *cdma_hw;
struct xilinx_axidma_desc_hw *axidma_hw;
+ struct xilinx_aximcdma_desc_hw *aximcdma_hw;
struct list_head *entry;
u32 residue = 0;
@@ -961,13 +963,23 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
cdma_hw = &cdma_seg->hw;
residue += (cdma_hw->control - cdma_hw->status) &
chan->xdev->max_buffer_len;
- } else {
+ } else if (chan->xdev->dma_config->dmatype ==
+ XDMA_TYPE_AXIDMA) {
axidma_seg = list_entry(entry,
struct xilinx_axidma_tx_segment,
node);
axidma_hw = &axidma_seg->hw;
residue += (axidma_hw->control - axidma_hw->status) &
chan->xdev->max_buffer_len;
+ } else {
+ aximcdma_seg =
+ list_entry(entry,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ aximcdma_hw = &aximcdma_seg->hw;
+ residue +=
+ (aximcdma_hw->control - aximcdma_hw->status) &
+ chan->xdev->max_buffer_len;
}
}
@@ -1135,7 +1147,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
((i + 1) % XILINX_DMA_NUM_DESCS));
chan->seg_mv[i].phys = chan->seg_p +
- sizeof(*chan->seg_v) * i;
+ sizeof(*chan->seg_mv) * i;
list_add_tail(&chan->seg_mv[i].node,
&chan->free_seg_list);
}
@@ -1560,7 +1572,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment;
+ struct xilinx_aximcdma_tx_segment *tail_segment;
u32 reg;
/*
@@ -1582,7 +1594,7 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_axidma_tx_segment, node);
+ struct xilinx_aximcdma_tx_segment, node);
reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
@@ -1864,6 +1876,7 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
struct xilinx_vdma_tx_segment *tail_segment;
struct xilinx_dma_tx_descriptor *tail_desc;
struct xilinx_axidma_tx_segment *axidma_tail_segment;
+ struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
struct xilinx_cdma_tx_segment *cdma_tail_segment;
if (list_empty(&chan->pending_list))
@@ -1885,11 +1898,17 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
struct xilinx_cdma_tx_segment,
node);
cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
axidma_tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment,
node);
axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else {
+ aximcdma_tail_segment =
+ list_last_entry(&tail_desc->segments,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
}
/*
@@ -2836,10 +2855,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->stop_transfer = xilinx_dma_stop_transfer;
}
- /* check if SG is enabled (only for AXIDMA and CDMA) */
+ /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
- if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_SG_MASK)
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_SG_MASK)
chan->has_sg = true;
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
chan->has_sg ? "enabled" : "disabled");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 42d9748921f5..8e988f07f085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1055,10 +1055,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
/* Arcturus */
- {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
/* Navi10 */
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e93e18c06c0e..0e7118000919 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7506,7 +7506,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
bool mode_set_reset_required = false;
drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_atomic_helper_calc_timestamping_constants(state);
dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) {
@@ -7533,6 +7532,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
}
+ drm_atomic_helper_calc_timestamping_constants(state);
+
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 2a1fea501f8c..3f1e7a196a23 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -299,8 +299,8 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
- [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 748df1cacd2b..0c79a9ba48bb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2327,12 +2327,6 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
{
enum drm_connector_status result;
- mutex_lock(&hdmi->mutex);
- hdmi->force = DRM_FORCE_UNSPECIFIED;
- dw_hdmi_update_power(hdmi);
- dw_hdmi_update_phy_mask(hdmi);
- mutex_unlock(&hdmi->mutex);
-
result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
mutex_lock(&hdmi->mutex);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 50cad0e4a92e..375c79e23ca5 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -140,7 +140,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
unsigned int c = 0;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
- pl_flag = TTM_PL_FLAG_TOPDOWN;
+ invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 31337d2a2cde..99e682563d47 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -12878,10 +12878,11 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
case 10 ... 11:
bpp = 10 * 3;
break;
- case 12:
+ case 12 ... 16:
bpp = 12 * 3;
break;
default:
+ MISSING_CASE(conn_state->max_bpc);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index f82c6dd1de18..9bb16bdf93cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -5457,6 +5457,7 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
intel_context_fini(&ve->context);
+ intel_breadcrumbs_free(ve->base.breadcrumbs);
intel_engine_free_request_pool(&ve->base);
kfree(ve->bonds);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index b8f56e62158e..313e51e7d4f7 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -243,8 +243,9 @@ static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
* only, __init_mocs_table() take care to program unused index with
* this entry.
*/
- MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
- L3_3_WB),
+ MOCS_ENTRY(I915_MOCS_PTE,
+ LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
+ L3_1_UC),
GEN11_MOCS_ENTRIES,
/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index ab675d35030d..d7b8e4457fc2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -56,9 +56,12 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
static void gen11_rc6_enable(struct intel_rc6 *rc6)
{
- struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ u32 pg_enable;
+ int i;
/* 2b: Program RC6 thresholds.*/
set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
@@ -102,10 +105,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- set(uncore, GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE);
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+
+ if (INTEL_GEN(gt->i915) >= 12) {
+ for (i = 0; i < I915_MAX_VCS; i++)
+ if (HAS_ENGINE(gt, _VCS(i)))
+ pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
+ VDN_MFX_POWERGATE_ENABLE(i));
+ }
+
+ set(uncore, GEN9_PG_ENABLE, pg_enable);
}
static void gen9_rc6_enable(struct intel_rc6 *rc6)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 6c580d0d9ea8..4a3bde7c9f21 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -131,8 +131,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
return;
}
- if (wal->list)
+ if (wal->list) {
memcpy(list, wal->list, sizeof(*wa) * wal->count);
+ kfree(wal->list);
+ }
wal->list = list;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 7ba16ddfe75f..d7898e87791f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -164,7 +164,7 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
/* let the virtual display supports DP1.2 */
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
- 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ 0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index ad8a9df49f29..778eb8cab610 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -829,8 +829,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
- if (!try_module_get(THIS_MODULE))
+ if (!try_module_get(THIS_MODULE)) {
+ ret = -ENODEV;
goto undo_group;
+ }
ret = kvmgt_guest_init(mdev);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index f6d7e33c7099..399582aeeefb 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -439,7 +439,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (IS_BROADWELL(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
- else
+ /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
+ else if (!IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d805d4da6181..664f3bf9af03 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8971,10 +8971,6 @@ enum {
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
-#define POWERGATE_ENABLE _MMIO(0xa210)
-#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3)
-#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4)
-
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
@@ -9114,9 +9110,11 @@ enum {
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
-#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
-#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
+#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
+#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
+#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
+#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n))
+#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n))
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 34e0d22d456b..cfb806767fc5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7118,23 +7118,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 vd_pg_enable = 0;
- unsigned int i;
-
/* Wa_1409120013:tgl */
I915_WRITE(ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
- for (i = 0; i < I915_MAX_VCS; i++) {
- if (HAS_ENGINE(&dev_priv->gt, _VCS(i)))
- vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
- VDN_MFX_POWERGATE_ENABLE(i);
- }
-
- I915_WRITE(POWERGATE_ENABLE,
- I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
-
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 64bbb8288249..e424a6d1a68c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -2293,8 +2293,10 @@ static int perf_request_latency(void *arg)
struct intel_context *ce;
ce = intel_context_create(engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {
@@ -2467,8 +2469,10 @@ static int perf_series_engines(void *arg)
struct intel_context *ce;
ce = intel_context_create(engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 77497b45f9a2..55960cbb1019 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -814,9 +814,15 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
*
* XXX(hch): this has no business in a driver and needs to move
* to the device tree.
+ *
+ * If we have two subsequent calls to dma_direct_set_offset
+ * returns -EINVAL. Unfortunately, this happens when we have two
+ * backends in the system, and will result in the driver
+ * reporting an error while it has been setup properly before.
+ * Ignore EINVAL, but it should really be removed eventually.
*/
ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
+ if (ret && ret != -EINVAL)
return ret;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index d4c08043dd81..92add2cef2e7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -208,6 +208,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
phy_node = of_parse_phandle(dev->of_node, "phys", 0);
if (!phy_node) {
dev_err(dev, "Can't found PHY phandle\n");
+ ret = -EINVAL;
goto err_disable_clk_tmds;
}
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index a50ba4a4a1d7..b88f889b3932 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -23,19 +23,17 @@
#define CP_2WHEEL_MOUSE_HACK 0x02
#define CP_2WHEEL_MOUSE_HACK_ON 0x04
+#define VA_INVAL_LOGICAL_BOUNDARY 0x08
+
/*
* Some USB barcode readers from cypress have usage min and usage max in
* the wrong order
*/
-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
unsigned int i;
- if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
- return rdesc;
-
if (*rsize < 4)
return rdesc;
@@ -48,6 +46,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /*
+ * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly
+ * reports Logical Minimum of its Consumer Control device as 572
+ * (0x02 0x3c). Fix this by setting its Logical Minimum to zero.
+ */
+ if (*rsize == 25 &&
+ rdesc[0] == 0x05 && rdesc[1] == 0x0c &&
+ rdesc[2] == 0x09 && rdesc[3] == 0x01 &&
+ rdesc[6] == 0x19 && rdesc[7] == 0x00 &&
+ rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) {
+ hid_info(hdev,
+ "fixing up varmilo VA104M consumer control report descriptor\n");
+ rdesc[12] = 0x00;
+ rdesc[13] = 0x00;
+ }
+ return rdesc;
+}
+
+static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if (quirks & CP_RDESC_SWAPPED_MIN_MAX)
+ rdesc = cp_rdesc_fixup(hdev, rdesc, rsize);
+ if (quirks & VA_INVAL_LOGICAL_BOUNDARY)
+ rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize);
+
+ return rdesc;
+}
+
static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -128,6 +160,8 @@ static const struct hid_device_id cp_devices[] = {
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1),
+ .driver_data = VA_INVAL_LOGICAL_BOUNDARY },
{ }
};
MODULE_DEVICE_TABLE(hid, cp_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d69842f79fc6..f170feaac40b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -331,6 +331,8 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
+#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1 0X07b1
+
#define USB_VENDOR_ID_DATA_MODUL 0x7374
#define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201
@@ -443,6 +445,10 @@
#define USB_VENDOR_ID_FRUCTEL 0x25B6
#define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002
+#define USB_VENDOR_ID_GAMEVICE 0x27F8
+#define USB_DEVICE_ID_GAMEVICE_GV186 0x0BBE
+#define USB_DEVICE_ID_GAMEVICE_KISHI 0x0BBF
+
#define USB_VENDOR_ID_GAMERON 0x0810
#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
@@ -485,6 +491,7 @@
#define USB_DEVICE_ID_PENPOWER 0x00f4
#define USB_VENDOR_ID_GREENASIA 0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010
#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
@@ -743,6 +750,7 @@
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
+#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
#define USB_DEVICE_ID_LOGITECH_C007 0xc007
#define USB_DEVICE_ID_LOGITECH_C077 0xc077
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
@@ -1298,6 +1306,7 @@
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
+#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9770db624bfa..4dca11392459 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -319,6 +319,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 044a93f3c117..742c052b0110 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -11,6 +11,48 @@
#include "hid-ids.h"
+#define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0)
+
+static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
+ if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
+ hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
+ rdesc[163] = HID_MAIN_ITEM_RELATIVE;
+ }
+ }
+
+ return rdesc;
+}
+
+static int ite_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) &&
+ (usage->hid & HID_USAGE_PAGE) == 0x00880000) {
+ if (usage->hid == 0x00880078) {
+ /* Touchpad on, userspace expects F22 for this */
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22);
+ return 1;
+ }
+ if (usage->hid == 0x00880079) {
+ /* Touchpad off, userspace expects F23 for this */
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23);
+ return 1;
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
static int ite_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -37,13 +79,27 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ hid_set_drvdata(hdev, (void *)id->driver_data);
+
+ ret = hid_open_report(hdev);
+ if (ret)
+ return ret;
+
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
- USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
@@ -55,6 +111,9 @@ MODULE_DEVICE_TABLE(hid, ite_devices);
static struct hid_driver ite_driver = {
.name = "itetech",
.id_table = ite_devices,
+ .probe = ite_probe,
+ .report_fixup = ite_report_fixup,
+ .input_mapping = ite_input_mapping,
.event = ite_event,
};
module_hid_driver(ite_driver);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 72fb6e54a50a..1ffcfc9a1e03 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -328,7 +328,7 @@ static const char mse_bluetooth_descriptor[] = {
0x25, 0x01, /* LOGICAL_MAX (1) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x95, 0x04, /* REPORT_COUNT (4) */
- 0x81, 0x06, /* INPUT */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
0xC0, /* END_COLLECTION */
0xC0, /* END_COLLECTION */
};
@@ -866,11 +866,24 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
schedule_work(&djrcv_dev->work);
}
+/*
+ * Some quad/bluetooth keyboards have a builtin touchpad in this case we see
+ * only 1 paired device with a device_type of REPORT_TYPE_KEYBOARD. For the
+ * touchpad to work we must also forward mouse input reports to the dj_hiddev
+ * created for the keyboard (instead of forwarding them to a second paired
+ * device with a device_type of REPORT_TYPE_MOUSE as we normally would).
+ */
+static const u16 kbd_builtin_touchpad_ids[] = {
+ 0xb309, /* Dinovo Edge */
+ 0xb30c, /* Dinovo Mini */
+};
+
static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
struct hidpp_event *hidpp_report,
struct dj_workitem *workitem)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ int i, id;
workitem->type = WORKITEM_TYPE_PAIRED;
workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
@@ -882,6 +895,13 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
POWER_KEYS | MEDIA_CENTER |
HIDPP;
+ id = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb;
+ for (i = 0; i < ARRAY_SIZE(kbd_builtin_touchpad_ids); i++) {
+ if (id == kbd_builtin_touchpad_ids[i]) {
+ workitem->reports_supported |= STD_MOUSE;
+ break;
+ }
+ }
break;
case REPORT_TYPE_MOUSE:
workitem->reports_supported |= STD_MOUSE | HIDPP;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index b8b53dc95e86..0ca723119547 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -93,6 +93,8 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
+#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
* as register access protocol or RAP, the second version hidpp20 is known as
@@ -2951,6 +2953,26 @@ static int g920_get_config(struct hidpp_device *hidpp,
}
/* -------------------------------------------------------------------------- */
+/* Logitech Dinovo Mini keyboard with builtin touchpad */
+/* -------------------------------------------------------------------------- */
+#define DINOVO_MINI_PRODUCT_ID 0xb30c
+
+static int lg_dinovo_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
+ return 0;
+
+ switch (usage->hid & HID_USAGE) {
+ case 0x00d: lg_map_key_clear(KEY_MEDIA); break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+/* -------------------------------------------------------------------------- */
/* HID++1.0 devices which use HID++ reports for their wheels */
/* -------------------------------------------------------------------------- */
static int hidpp10_wheel_connect(struct hidpp_device *hidpp)
@@ -3185,6 +3207,9 @@ static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->application != HID_GD_MOUSE)
return m560_input_mapping(hdev, hi, field, usage, bit, max);
+ if (hdev->product == DINOVO_MINI_PRODUCT_ID)
+ return lg_dinovo_input_mapping(hdev, hi, field, usage, bit, max);
+
return 0;
}
@@ -3947,6 +3972,7 @@ static const struct hid_device_id hidpp_devices[] = {
LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech MX Anywhere 2 */
LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4072), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
@@ -3971,6 +3997,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */
+ LDJ_DEVICE(0xb309),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
@@ -4013,6 +4042,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5000 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* Dinovo Edge keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 0d27ccb55dd9..4211b9839209 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -49,6 +49,36 @@ enum {
MCP2221_ALT_F_NOT_GPIOD = 0xEF,
};
+/* MCP GPIO direction encoding */
+enum {
+ MCP2221_DIR_OUT = 0x00,
+ MCP2221_DIR_IN = 0x01,
+};
+
+#define MCP_NGPIO 4
+
+/* MCP GPIO set command layout */
+struct mcp_set_gpio {
+ u8 cmd;
+ u8 dummy;
+ struct {
+ u8 change_value;
+ u8 value;
+ u8 change_direction;
+ u8 direction;
+ } gpio[MCP_NGPIO];
+} __packed;
+
+/* MCP GPIO get command layout */
+struct mcp_get_gpio {
+ u8 cmd;
+ u8 dummy;
+ struct {
+ u8 direction;
+ u8 value;
+ } gpio[MCP_NGPIO];
+} __packed;
+
/*
* There is no way to distinguish responses. Therefore next command
* is sent only after response to previous has been received. Mutex
@@ -542,7 +572,7 @@ static int mcp_gpio_get(struct gpio_chip *gc,
mcp->txbuf[0] = MCP2221_GPIO_GET;
- mcp->gp_idx = (offset + 1) * 2;
+ mcp->gp_idx = offsetof(struct mcp_get_gpio, gpio[offset].value);
mutex_lock(&mcp->lock);
ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
@@ -559,7 +589,7 @@ static void mcp_gpio_set(struct gpio_chip *gc,
memset(mcp->txbuf, 0, 18);
mcp->txbuf[0] = MCP2221_GPIO_SET;
- mcp->gp_idx = ((offset + 1) * 4) - 1;
+ mcp->gp_idx = offsetof(struct mcp_set_gpio, gpio[offset].value);
mcp->txbuf[mcp->gp_idx - 1] = 1;
mcp->txbuf[mcp->gp_idx] = !!value;
@@ -575,7 +605,7 @@ static int mcp_gpio_dir_set(struct mcp2221 *mcp,
memset(mcp->txbuf, 0, 18);
mcp->txbuf[0] = MCP2221_GPIO_SET;
- mcp->gp_idx = (offset + 1) * 5;
+ mcp->gp_idx = offsetof(struct mcp_set_gpio, gpio[offset].direction);
mcp->txbuf[mcp->gp_idx - 1] = 1;
mcp->txbuf[mcp->gp_idx] = val;
@@ -590,7 +620,7 @@ static int mcp_gpio_direction_input(struct gpio_chip *gc,
struct mcp2221 *mcp = gpiochip_get_data(gc);
mutex_lock(&mcp->lock);
- ret = mcp_gpio_dir_set(mcp, offset, 0);
+ ret = mcp_gpio_dir_set(mcp, offset, MCP2221_DIR_IN);
mutex_unlock(&mcp->lock);
return ret;
@@ -603,7 +633,7 @@ static int mcp_gpio_direction_output(struct gpio_chip *gc,
struct mcp2221 *mcp = gpiochip_get_data(gc);
mutex_lock(&mcp->lock);
- ret = mcp_gpio_dir_set(mcp, offset, 1);
+ ret = mcp_gpio_dir_set(mcp, offset, MCP2221_DIR_OUT);
mutex_unlock(&mcp->lock);
/* Can't configure as output, bailout early */
@@ -623,7 +653,7 @@ static int mcp_gpio_get_direction(struct gpio_chip *gc,
mcp->txbuf[0] = MCP2221_GPIO_GET;
- mcp->gp_idx = (offset + 1) * 2;
+ mcp->gp_idx = offsetof(struct mcp_get_gpio, gpio[offset].direction);
mutex_lock(&mcp->lock);
ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
@@ -632,7 +662,7 @@ static int mcp_gpio_get_direction(struct gpio_chip *gc,
if (ret)
return ret;
- if (mcp->gpio_dir)
+ if (mcp->gpio_dir == MCP2221_DIR_IN)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -758,7 +788,7 @@ static int mcp2221_raw_event(struct hid_device *hdev,
mcp->status = -ENOENT;
} else {
mcp->status = !!data[mcp->gp_idx];
- mcp->gpio_dir = !!data[mcp->gp_idx + 1];
+ mcp->gpio_dir = data[mcp->gp_idx + 1];
}
break;
default:
@@ -860,7 +890,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->get_direction = mcp_gpio_get_direction;
mcp->gc->set = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
- mcp->gc->ngpio = 4;
+ mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
mcp->gc->can_sleep = 1;
mcp->gc->parent = &hdev->dev;
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 7a2be0205dfd..bf7ecab5d9e5 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -83,7 +83,12 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186),
+ HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI),
+ HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 94c7398b5c27..3dd7d3246737 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -483,7 +483,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
return 1;
ptr = raw_data;
- ptr++; /* Skip report id */
+ if (report->id)
+ ptr++; /* Skip report id */
spin_lock_irqsave(&pdata->lock, flags);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 86b568037cb8..8e9c9e646cb7 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -385,6 +385,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
+ USB_DEVICE_ID_UGTIZER_TABLET_GT5040) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_TABLET_G5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7d20d1fcf8d2..d26d8cd98efc 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -997,6 +997,8 @@ int uclogic_params_init(struct uclogic_params *params,
break;
case VID_PID(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GP0610):
+ case VID_PID(USB_VENDOR_ID_UGTIZER,
+ USB_DEVICE_ID_UGTIZER_TABLET_GT5040):
case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540):
case VID_PID(USB_VENDOR_ID_UGEE,
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 786e3e9af1c9..aeff1ffb0c8b 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -943,6 +943,11 @@ static void i2c_hid_acpi_enable_wakeup(struct device *dev)
}
}
+static void i2c_hid_acpi_shutdown(struct device *dev)
+{
+ acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
+}
+
static const struct acpi_device_id i2c_hid_acpi_match[] = {
{"ACPI0C50", 0 },
{"PNP0C50", 0 },
@@ -959,6 +964,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
static inline void i2c_hid_acpi_enable_wakeup(struct device *dev) {}
+
+static inline void i2c_hid_acpi_shutdown(struct device *dev) {}
#endif
#ifdef CONFIG_OF
@@ -1175,6 +1182,8 @@ static void i2c_hid_shutdown(struct i2c_client *client)
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
free_irq(client->irq, ihid);
+
+ i2c_hid_acpi_shutdown(&client->dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 0cde10fe0e71..f202ac7f4b3d 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -244,9 +244,13 @@ int hv_synic_cleanup(unsigned int cpu)
/*
* Hyper-V does not provide a way to change the connect CPU once
- * it is set; we must prevent the connect CPU from going offline.
+ * it is set; we must prevent the connect CPU from going offline
+ * while the VM is running normally. But in the panic or kexec()
+ * path where the vmbus is already disconnected, the CPU must be
+ * allowed to shut down.
*/
- if (cpu == VMBUS_CONNECT_CPU)
+ if (cpu == VMBUS_CONNECT_CPU &&
+ vmbus_connection.conn_state == CONNECTED)
return -EBUSY;
/*
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index beb38d9d607d..560a3373ff20 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -126,6 +126,12 @@ enum kx_chipset {
KX_MAX_CHIPS /* this must be last */
};
+enum kx_acpi_type {
+ ACPI_GENERIC,
+ ACPI_SMO8500,
+ ACPI_KIOX010A,
+};
+
struct kxcjk1013_data {
struct i2c_client *client;
struct iio_trigger *dready_trig;
@@ -143,7 +149,7 @@ struct kxcjk1013_data {
bool motion_trigger_on;
int64_t timestamp;
enum kx_chipset chipset;
- bool is_smo8500_device;
+ enum kx_acpi_type acpi_type;
};
enum kxcjk1013_axis {
@@ -270,6 +276,32 @@ static const struct {
{19163, 1, 0},
{38326, 0, 1} };
+#ifdef CONFIG_ACPI
+enum kiox010a_fn_index {
+ KIOX010A_SET_LAPTOP_MODE = 1,
+ KIOX010A_SET_TABLET_MODE = 2,
+};
+
+static int kiox010a_dsm(struct device *dev, int fn_index)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ guid_t kiox010a_dsm_guid;
+ union acpi_object *obj;
+
+ if (!handle)
+ return -ENODEV;
+
+ guid_parse("1f339696-d475-4e26-8cad-2e9f8e6d7a91", &kiox010a_dsm_guid);
+
+ obj = acpi_evaluate_dsm(handle, &kiox010a_dsm_guid, 1, fn_index, NULL);
+ if (!obj)
+ return -EIO;
+
+ ACPI_FREE(obj);
+ return 0;
+}
+#endif
+
static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
enum kxcjk1013_mode mode)
{
@@ -347,6 +379,13 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
{
int ret;
+#ifdef CONFIG_ACPI
+ if (data->acpi_type == ACPI_KIOX010A) {
+ /* Make sure the kbd and touchpad on 2-in-1s using 2 KXCJ91008-s work */
+ kiox010a_dsm(&data->client->dev, KIOX010A_SET_LAPTOP_MODE);
+ }
+#endif
+
ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_WHO_AM_I);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading who_am_i\n");
@@ -1247,7 +1286,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
static const char *kxcjk1013_match_acpi_device(struct device *dev,
enum kx_chipset *chipset,
- bool *is_smo8500_device)
+ enum kx_acpi_type *acpi_type)
{
const struct acpi_device_id *id;
@@ -1256,7 +1295,9 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev,
return NULL;
if (strcmp(id->id, "SMO8500") == 0)
- *is_smo8500_device = true;
+ *acpi_type = ACPI_SMO8500;
+ else if (strcmp(id->id, "KIOX010A") == 0)
+ *acpi_type = ACPI_KIOX010A;
*chipset = (enum kx_chipset)id->driver_data;
@@ -1299,7 +1340,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
} else if (ACPI_HANDLE(&client->dev)) {
name = kxcjk1013_match_acpi_device(&client->dev,
&data->chipset,
- &data->is_smo8500_device);
+ &data->acpi_type);
} else
return -ENODEV;
@@ -1316,7 +1357,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &kxcjk1013_info;
- if (client->irq > 0 && !data->is_smo8500_device) {
+ if (client->irq > 0 && data->acpi_type != ACPI_SMO8500) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
kxcjk1013_data_rdy_trig_poll,
kxcjk1013_event_handler,
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index 92b25083e23f..1aafbe2cfe67 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -71,7 +71,7 @@
#define JZ4725B_ADC_BATTERY_HIGH_VREF_BITS 10
#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
-#define JZ4770_ADC_BATTERY_VREF 6600
+#define JZ4770_ADC_BATTERY_VREF 1200
#define JZ4770_ADC_BATTERY_VREF_BITS 12
#define JZ_ADC_IRQ_AUX BIT(0)
@@ -177,13 +177,12 @@ static void ingenic_adc_set_config(struct ingenic_adc *adc,
mutex_unlock(&adc->lock);
}
-static void ingenic_adc_enable(struct ingenic_adc *adc,
- int engine,
- bool enabled)
+static void ingenic_adc_enable_unlocked(struct ingenic_adc *adc,
+ int engine,
+ bool enabled)
{
u8 val;
- mutex_lock(&adc->lock);
val = readb(adc->base + JZ_ADC_REG_ENABLE);
if (enabled)
@@ -192,20 +191,41 @@ static void ingenic_adc_enable(struct ingenic_adc *adc,
val &= ~BIT(engine);
writeb(val, adc->base + JZ_ADC_REG_ENABLE);
+}
+
+static void ingenic_adc_enable(struct ingenic_adc *adc,
+ int engine,
+ bool enabled)
+{
+ mutex_lock(&adc->lock);
+ ingenic_adc_enable_unlocked(adc, engine, enabled);
mutex_unlock(&adc->lock);
}
static int ingenic_adc_capture(struct ingenic_adc *adc,
int engine)
{
+ u32 cfg;
u8 val;
int ret;
- ingenic_adc_enable(adc, engine, true);
+ /*
+ * Disable CMD_SEL temporarily, because it causes wrong VBAT readings,
+ * probably due to the switch of VREF. We must keep the lock here to
+ * avoid races with the buffer enable/disable functions.
+ */
+ mutex_lock(&adc->lock);
+ cfg = readl(adc->base + JZ_ADC_REG_CFG);
+ writel(cfg & ~JZ_ADC_REG_CFG_CMD_SEL, adc->base + JZ_ADC_REG_CFG);
+
+ ingenic_adc_enable_unlocked(adc, engine, true);
ret = readb_poll_timeout(adc->base + JZ_ADC_REG_ENABLE, val,
!(val & BIT(engine)), 250, 1000);
if (ret)
- ingenic_adc_enable(adc, engine, false);
+ ingenic_adc_enable_unlocked(adc, engine, false);
+
+ writel(cfg, adc->base + JZ_ADC_REG_CFG);
+ mutex_unlock(&adc->lock);
return ret;
}
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index ac415cb089cd..79c1dd68b909 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -9,9 +9,9 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/iopoll.h>
#include <linux/io.h>
#include <linux/iio/iio.h>
@@ -276,6 +276,8 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
goto err_disable_clk;
}
+ adc_dev->dev_comp = device_get_match_data(&pdev->dev);
+
mutex_init(&adc_dev->lock);
mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index cd870c089182..a83199b212a4 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -41,18 +41,16 @@
* struct stm32_adc_common_regs - stm32 common registers
* @csr: common status register offset
* @ccr: common control register offset
- * @eoc1_msk: adc1 end of conversion flag in @csr
- * @eoc2_msk: adc2 end of conversion flag in @csr
- * @eoc3_msk: adc3 end of conversion flag in @csr
+ * @eoc_msk: array of eoc (end of conversion flag) masks in csr for adc1..n
+ * @ovr_msk: array of ovr (overrun flag) masks in csr for adc1..n
* @ier: interrupt enable register offset for each adc
* @eocie_msk: end of conversion interrupt enable mask in @ier
*/
struct stm32_adc_common_regs {
u32 csr;
u32 ccr;
- u32 eoc1_msk;
- u32 eoc2_msk;
- u32 eoc3_msk;
+ u32 eoc_msk[STM32_ADC_MAX_ADCS];
+ u32 ovr_msk[STM32_ADC_MAX_ADCS];
u32 ier;
u32 eocie_msk;
};
@@ -282,21 +280,20 @@ out:
static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
.csr = STM32F4_ADC_CSR,
.ccr = STM32F4_ADC_CCR,
- .eoc1_msk = STM32F4_EOC1 | STM32F4_OVR1,
- .eoc2_msk = STM32F4_EOC2 | STM32F4_OVR2,
- .eoc3_msk = STM32F4_EOC3 | STM32F4_OVR3,
+ .eoc_msk = { STM32F4_EOC1, STM32F4_EOC2, STM32F4_EOC3},
+ .ovr_msk = { STM32F4_OVR1, STM32F4_OVR2, STM32F4_OVR3},
.ier = STM32F4_ADC_CR1,
- .eocie_msk = STM32F4_EOCIE | STM32F4_OVRIE,
+ .eocie_msk = STM32F4_EOCIE,
};
/* STM32H7 common registers definitions */
static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
.csr = STM32H7_ADC_CSR,
.ccr = STM32H7_ADC_CCR,
- .eoc1_msk = STM32H7_EOC_MST | STM32H7_OVR_MST,
- .eoc2_msk = STM32H7_EOC_SLV | STM32H7_OVR_SLV,
+ .eoc_msk = { STM32H7_EOC_MST, STM32H7_EOC_SLV},
+ .ovr_msk = { STM32H7_OVR_MST, STM32H7_OVR_SLV},
.ier = STM32H7_ADC_IER,
- .eocie_msk = STM32H7_EOCIE | STM32H7_OVRIE,
+ .eocie_msk = STM32H7_EOCIE,
};
static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
@@ -318,6 +315,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
{
struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ int i;
u32 status;
chained_irq_enter(chip, desc);
@@ -335,17 +333,12 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
* before invoking the interrupt handler (e.g. call ISR only for
* IRQ-enabled ADCs).
*/
- if (status & priv->cfg->regs->eoc1_msk &&
- stm32_adc_eoc_enabled(priv, 0))
- generic_handle_irq(irq_find_mapping(priv->domain, 0));
-
- if (status & priv->cfg->regs->eoc2_msk &&
- stm32_adc_eoc_enabled(priv, 1))
- generic_handle_irq(irq_find_mapping(priv->domain, 1));
-
- if (status & priv->cfg->regs->eoc3_msk &&
- stm32_adc_eoc_enabled(priv, 2))
- generic_handle_irq(irq_find_mapping(priv->domain, 2));
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
+ if ((status & priv->cfg->regs->eoc_msk[i] &&
+ stm32_adc_eoc_enabled(priv, i)) ||
+ (status & priv->cfg->regs->ovr_msk[i]))
+ generic_handle_irq(irq_find_mapping(priv->domain, i));
+ }
chained_irq_exit(chip, desc);
};
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index b3f31f147347..16c02c30dec7 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -154,6 +154,7 @@ struct stm32_adc;
* @start_conv: routine to start conversions
* @stop_conv: routine to stop conversions
* @unprepare: optional unprepare routine (disable, power-down)
+ * @irq_clear: routine to clear irqs
* @smp_cycles: programmable sampling time (ADC clock cycles)
*/
struct stm32_adc_cfg {
@@ -166,6 +167,7 @@ struct stm32_adc_cfg {
void (*start_conv)(struct iio_dev *, bool dma);
void (*stop_conv)(struct iio_dev *);
void (*unprepare)(struct iio_dev *);
+ void (*irq_clear)(struct iio_dev *indio_dev, u32 msk);
const unsigned int *smp_cycles;
};
@@ -621,6 +623,13 @@ static void stm32f4_adc_stop_conv(struct iio_dev *indio_dev)
STM32F4_ADON | STM32F4_DMA | STM32F4_DDS);
}
+static void stm32f4_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
+ stm32_adc_clr_bits(adc, adc->cfg->regs->isr_eoc.reg, msk);
+}
+
static void stm32h7_adc_start_conv(struct iio_dev *indio_dev, bool dma)
{
struct stm32_adc *adc = iio_priv(indio_dev);
@@ -659,6 +668,13 @@ static void stm32h7_adc_stop_conv(struct iio_dev *indio_dev)
stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR, STM32H7_DMNGT_MASK);
}
+static void stm32h7_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ /* On STM32H7 IRQs are cleared by writing 1 into ISR register */
+ stm32_adc_set_bits(adc, adc->cfg->regs->isr_eoc.reg, msk);
+}
+
static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
@@ -1235,17 +1251,40 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
}
}
+static void stm32_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
+ adc->cfg->irq_clear(indio_dev, msk);
+}
+
static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
{
struct iio_dev *indio_dev = data;
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+ u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
- if (status & regs->isr_ovr.mask)
+ /* Check ovr status right now, as ovr mask should be already disabled */
+ if (status & regs->isr_ovr.mask) {
+ /*
+ * Clear ovr bit to avoid subsequent calls to IRQ handler.
+ * This requires to stop ADC first. OVR bit state in ISR,
+ * is propaged to CSR register by hardware.
+ */
+ adc->cfg->stop_conv(indio_dev);
+ stm32_adc_irq_clear(indio_dev, regs->isr_ovr.mask);
dev_err(&indio_dev->dev, "Overrun, stopping: restart needed\n");
+ return IRQ_HANDLED;
+ }
- return IRQ_HANDLED;
+ if (!(status & mask))
+ dev_err_ratelimited(&indio_dev->dev,
+ "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
+ mask, status);
+
+ return IRQ_NONE;
}
static irqreturn_t stm32_adc_isr(int irq, void *data)
@@ -1254,6 +1293,10 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+ u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
+
+ if (!(status & mask))
+ return IRQ_WAKE_THREAD;
if (status & regs->isr_ovr.mask) {
/*
@@ -2046,6 +2089,7 @@ static const struct stm32_adc_cfg stm32f4_adc_cfg = {
.start_conv = stm32f4_adc_start_conv,
.stop_conv = stm32f4_adc_stop_conv,
.smp_cycles = stm32f4_adc_smp_cycles,
+ .irq_clear = stm32f4_adc_irq_clear,
};
static const struct stm32_adc_cfg stm32h7_adc_cfg = {
@@ -2057,6 +2101,7 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
.prepare = stm32h7_adc_prepare,
.unprepare = stm32h7_adc_unprepare,
.smp_cycles = stm32h7_adc_smp_cycles,
+ .irq_clear = stm32h7_adc_irq_clear,
};
static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
@@ -2069,6 +2114,7 @@ static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
.prepare = stm32h7_adc_prepare,
.unprepare = stm32h7_adc_unprepare,
.smp_cycles = stm32h7_adc_smp_cycles,
+ .irq_clear = stm32h7_adc_irq_clear,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index c62cacc04672..e3f507771f17 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -256,7 +256,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
- u32 ver_mask;
+ u32 ver_mask, temp;
int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 };
int ret, i;
@@ -311,10 +311,16 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
&frequencies[2],
&state->fifo_max_event_count);
} else {
- frequencies[1] = state->resp->info_3.min_frequency;
- frequencies[2] = state->resp->info_3.max_frequency;
- state->fifo_max_event_count =
- state->resp->info_3.fifo_max_event_count;
+ if (state->resp->info_3.max_frequency == 0) {
+ get_default_min_max_freq(state->resp->info.type,
+ &frequencies[1],
+ &frequencies[2],
+ &temp);
+ } else {
+ frequencies[1] = state->resp->info_3.min_frequency;
+ frequencies[2] = state->resp->info_3.max_frequency;
+ }
+ state->fifo_max_event_count = state->resp->info_3.fifo_max_event_count;
}
for (i = 0; i < ARRAY_SIZE(frequencies); i++) {
state->frequencies[2 * i] = frequencies[i] / 1000;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 8c8d8870ca07..99562ba85ee4 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -156,11 +156,13 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *sensor;
- u32 odr;
+ u32 odr, timeout;
sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 12500;
- msleep((2000000U / odr) + 1);
+ /* set 10ms as minimum timeout for i2c slave configuration */
+ timeout = max_t(u32, 2000000U / odr + 1, 10);
+ msleep(timeout);
}
/*
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index cade6dc0305b..33ad4dd0b5c7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -544,6 +544,7 @@ config VCNL4000
config VCNL4035
tristate "VCNL4035 combined ALS and proximity sensor"
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select REGMAP_I2C
depends on I2C
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 32a51432ec4f..9325e189a215 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -73,6 +73,9 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
This allows the user to config the default GID type that the CM
uses for each device, when initiaing new connections.
+config INFINIBAND_VIRT_DMA
+ def_bool !HIGHMEM
+
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5740d1ba3568..012156624b82 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -859,8 +859,8 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1);
- ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
- &cm.local_id_next, GFP_KERNEL);
+ ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
+ &cm.local_id_next, GFP_KERNEL);
if (ret < 0)
goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
@@ -878,8 +878,8 @@ error:
*/
static void cm_finalize_id(struct cm_id_private *cm_id_priv)
{
- xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
- cm_id_priv, GFP_KERNEL);
+ xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
+ cm_id_priv, GFP_ATOMIC);
}
struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
@@ -1169,7 +1169,7 @@ retest:
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
- xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
+ xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
@@ -4482,7 +4482,7 @@ static int __init ib_cm_init(void)
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
- xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 7eaf99538216..c87b94ea2939 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -15245,7 +15245,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
& CCE_REVISION_SW_MASK);
/* alloc netdev data */
- if (hfi1_netdev_alloc(dd))
+ ret = hfi1_netdev_alloc(dd);
+ if (ret)
goto bail_cleanup;
ret = set_up_context_variables(dd);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index fa2a3fa0c3e4..6895bac53990 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -266,7 +266,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
}
ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
if (ret)
- return ret;
+ goto err_srq_free;
spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 9ef5f5ce1ff6..c8e268082952 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
- depends on X86_64 && ARCH_DMA_ADDR_T_64BIT
+ depends on INFINIBAND_VIRT_DMA
+ depends on X86_64
depends on PCI
select DMA_VIRT_OPS
help
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index a0c6c7dfc181..8810bfa68049 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -2,7 +2,7 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
- depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
+ depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRYPTO_CRC32
select DMA_VIRT_OPS
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index b622fc62f2cd..3450ba5081df 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,7 @@
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND && LIBCRC32C
+ depends on INFINIBAND_VIRT_DMA
select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index 27126e621eb6..d450f11b98a7 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -99,7 +99,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio,
switch (data) {
case SUNKBD_RET_RESET:
- schedule_work(&sunkbd->tq);
+ if (sunkbd->enabled)
+ schedule_work(&sunkbd->tq);
sunkbd->reset = -1;
break;
@@ -200,16 +201,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
}
/*
- * sunkbd_reinit() sets leds and beeps to a state the computer remembers they
- * were in.
+ * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers
+ * they were in.
*/
-static void sunkbd_reinit(struct work_struct *work)
+static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd)
{
- struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
-
- wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
-
serio_write(sunkbd->serio, SUNKBD_CMD_SETLED);
serio_write(sunkbd->serio,
(!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) |
@@ -222,11 +219,39 @@ static void sunkbd_reinit(struct work_struct *work)
SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd));
}
+
+/*
+ * sunkbd_reinit() wait for the keyboard reset to complete and restores state
+ * of leds and beeps.
+ */
+
+static void sunkbd_reinit(struct work_struct *work)
+{
+ struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
+
+ /*
+ * It is OK that we check sunkbd->enabled without pausing serio,
+ * as we only want to catch true->false transition that will
+ * happen once and we will be woken up for it.
+ */
+ wait_event_interruptible_timeout(sunkbd->wait,
+ sunkbd->reset >= 0 || !sunkbd->enabled,
+ HZ);
+
+ if (sunkbd->reset >= 0 && sunkbd->enabled)
+ sunkbd_set_leds_beeps(sunkbd);
+}
+
static void sunkbd_enable(struct sunkbd *sunkbd, bool enable)
{
serio_pause_rx(sunkbd->serio);
sunkbd->enabled = enable;
serio_continue_rx(sunkbd->serio);
+
+ if (!enable) {
+ wake_up_interruptible(&sunkbd->wait);
+ cancel_work_sync(&sunkbd->tq);
+ }
}
/*
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 5fe92d4ba3f0..4cc4e8ff42b3 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -696,7 +696,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
struct input_dev *input_dev;
const struct adxl34x_platform_data *pdata;
int err, range, i;
- unsigned char revid;
+ int revid;
if (!irq) {
dev_err(dev, "no IRQ?\n");
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index c75b00c45d75..36e3cd908671 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -78,7 +78,7 @@ struct elan_transport_ops {
int (*iap_reset)(struct i2c_client *client);
int (*prepare_fw_update)(struct i2c_client *client, u16 ic_type,
- u8 iap_version);
+ u8 iap_version, u16 fw_page_size);
int (*write_fw_block)(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx);
int (*finish_fw_update)(struct i2c_client *client,
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index c599e21a8478..61ed3f5ca219 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -497,7 +497,8 @@ static int __elan_update_firmware(struct elan_tp_data *data,
u16 sw_checksum = 0, fw_checksum = 0;
error = data->ops->prepare_fw_update(client, data->ic_type,
- data->iap_version);
+ data->iap_version,
+ data->fw_page_size);
if (error)
return error;
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 5a496d4ffa49..13dc097eb6c6 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -517,7 +517,7 @@ static int elan_i2c_set_flash_key(struct i2c_client *client)
return 0;
}
-static int elan_read_write_iap_type(struct i2c_client *client)
+static int elan_read_write_iap_type(struct i2c_client *client, u16 fw_page_size)
{
int error;
u16 constant;
@@ -526,7 +526,7 @@ static int elan_read_write_iap_type(struct i2c_client *client)
do {
error = elan_i2c_write_cmd(client, ETP_I2C_IAP_TYPE_CMD,
- ETP_I2C_IAP_TYPE_REG);
+ fw_page_size / 2);
if (error) {
dev_err(&client->dev,
"cannot write iap type: %d\n", error);
@@ -543,7 +543,7 @@ static int elan_read_write_iap_type(struct i2c_client *client)
constant = le16_to_cpup((__le16 *)val);
dev_dbg(&client->dev, "iap type reg: 0x%04x\n", constant);
- if (constant == ETP_I2C_IAP_TYPE_REG)
+ if (constant == fw_page_size / 2)
return 0;
} while (--retry > 0);
@@ -553,7 +553,7 @@ static int elan_read_write_iap_type(struct i2c_client *client)
}
static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
- u8 iap_version)
+ u8 iap_version, u16 fw_page_size)
{
struct device *dev = &client->dev;
int error;
@@ -594,7 +594,7 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
}
if (ic_type >= 0x0D && iap_version >= 1) {
- error = elan_read_write_iap_type(client);
+ error = elan_read_write_iap_type(client, fw_page_size);
if (error)
return error;
}
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 8ff823751f3b..1820f1cfc1dc 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -340,7 +340,7 @@ static int elan_smbus_set_flash_key(struct i2c_client *client)
}
static int elan_smbus_prepare_fw_update(struct i2c_client *client, u16 ic_type,
- u8 iap_version)
+ u8 iap_version, u16 fw_page_size)
{
struct device *dev = &client->dev;
int len;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d3eda48032e3..944cbb519c6d 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -122,6 +122,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
#endif
+static bool i8042_present;
static bool i8042_bypass_aux_irq_test;
static char i8042_kbd_firmware_id[128];
static char i8042_aux_firmware_id[128];
@@ -343,6 +344,9 @@ int i8042_command(unsigned char *param, int command)
unsigned long flags;
int retval;
+ if (!i8042_present)
+ return -1;
+
spin_lock_irqsave(&i8042_lock, flags);
retval = __i8042_command(param, command);
spin_unlock_irqrestore(&i8042_lock, flags);
@@ -1612,12 +1616,15 @@ static int __init i8042_init(void)
err = i8042_platform_init();
if (err)
- return err;
+ return (err == -ENODEV) ? 0 : err;
err = i8042_controller_check();
if (err)
goto err_platform_exit;
+ /* Set this before creating the dev to allow i8042_command to work right away */
+ i8042_present = true;
+
pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
@@ -1636,6 +1643,9 @@ static int __init i8042_init(void)
static void __exit i8042_exit(void)
{
+ if (!i8042_present)
+ return;
+
platform_device_unregister(i8042_platform_device);
platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f012fe746df0..cc18f54ea887 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -96,6 +96,7 @@ config TOUCHSCREEN_AD7879_SPI
config TOUCHSCREEN_ADC
tristate "Generic ADC based resistive touchscreen"
depends on IIO
+ select IIO_BUFFER
select IIO_BUFFER_CB
help
Say Y here if you want to use the generic ADC
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b2e804473209..11319e4dce4a 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -335,7 +335,9 @@ static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
{
- dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&pdev->physfn->dev));
+ struct pci_dev *physfn = pci_physfn(pdev);
+
+ dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
}
static int dmar_pci_bus_notifier(struct notifier_block *nb,
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index c6622011d493..c71a9c279a33 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -179,7 +179,7 @@ static int rwbf_quirk;
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
-int intel_iommu_tboot_noforce;
+static int intel_iommu_tboot_noforce;
static int no_platform_optin;
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
@@ -4884,7 +4884,8 @@ int __init intel_iommu_init(void)
* Intel IOMMU is required for a TXT/tboot launch or platform
* opt in, so enforce that.
*/
- force_on = tboot_force_iommu() || platform_optin_force_iommu();
+ force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
+ platform_optin_force_iommu();
if (iommu_init_mempool()) {
if (force_on)
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 829ccef87426..d25a4b50c2f3 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -30,7 +30,10 @@
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8
+#define SDHCI_ARASAN_ITAPDLY_SEL_MASK 0xFF
+
#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC
+#define SDHCI_ARASAN_OTAPDLY_SEL_MASK 0x3F
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
@@ -600,14 +603,8 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
u8 tap_delay, tap_max = 0;
int ret;
- /*
- * This is applicable for SDHCI_SPEC_300 and above
- * ZynqMP does not set phase for <=25MHz clock.
- * If degrees is zero, no need to do anything.
- */
- if (host->version < SDHCI_SPEC_300 ||
- host->timing == MMC_TIMING_LEGACY ||
- host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ /* This is applicable for SDHCI_SPEC_300 and above */
+ if (host->version < SDHCI_SPEC_300)
return 0;
switch (host->timing) {
@@ -638,6 +635,9 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
if (ret)
pr_err("Error setting Output Tap Delay\n");
+ /* Release DLL Reset */
+ zynqmp_pm_sd_dll_reset(node_id, PM_DLL_RESET_RELEASE);
+
return ret;
}
@@ -668,16 +668,13 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
u8 tap_delay, tap_max = 0;
int ret;
- /*
- * This is applicable for SDHCI_SPEC_300 and above
- * ZynqMP does not set phase for <=25MHz clock.
- * If degrees is zero, no need to do anything.
- */
- if (host->version < SDHCI_SPEC_300 ||
- host->timing == MMC_TIMING_LEGACY ||
- host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ /* This is applicable for SDHCI_SPEC_300 and above */
+ if (host->version < SDHCI_SPEC_300)
return 0;
+ /* Assert DLL Reset */
+ zynqmp_pm_sd_dll_reset(node_id, PM_DLL_RESET_ASSERT);
+
switch (host->timing) {
case MMC_TIMING_MMC_HS:
case MMC_TIMING_SD_HS:
@@ -733,14 +730,8 @@ static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
struct sdhci_host *host = sdhci_arasan->host;
u8 tap_delay, tap_max = 0;
- /*
- * This is applicable for SDHCI_SPEC_300 and above
- * Versal does not set phase for <=25MHz clock.
- * If degrees is zero, no need to do anything.
- */
- if (host->version < SDHCI_SPEC_300 ||
- host->timing == MMC_TIMING_LEGACY ||
- host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ /* This is applicable for SDHCI_SPEC_300 and above */
+ if (host->version < SDHCI_SPEC_300)
return 0;
switch (host->timing) {
@@ -773,6 +764,7 @@ static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER);
regval |= SDHCI_OTAPDLY_ENABLE;
sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval &= ~SDHCI_ARASAN_OTAPDLY_SEL_MASK;
regval |= tap_delay;
sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
}
@@ -804,14 +796,8 @@ static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees)
struct sdhci_host *host = sdhci_arasan->host;
u8 tap_delay, tap_max = 0;
- /*
- * This is applicable for SDHCI_SPEC_300 and above
- * Versal does not set phase for <=25MHz clock.
- * If degrees is zero, no need to do anything.
- */
- if (host->version < SDHCI_SPEC_300 ||
- host->timing == MMC_TIMING_LEGACY ||
- host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ /* This is applicable for SDHCI_SPEC_300 and above */
+ if (host->version < SDHCI_SPEC_300)
return 0;
switch (host->timing) {
@@ -846,6 +832,7 @@ static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees)
sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
regval |= SDHCI_ITAPDLY_ENABLE;
sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval &= ~SDHCI_ARASAN_ITAPDLY_SEL_MASK;
regval |= tap_delay;
sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
regval &= ~SDHCI_ITAPDLY_CHGWIN;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 23da7f7fe093..9552708846ca 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -665,6 +665,15 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
}
}
+static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ /* Set UHS timing to SDR25 for High Speed mode */
+ if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
+ timing = MMC_TIMING_UHS_SDR25;
+ sdhci_set_uhs_signaling(host, timing);
+}
+
#define INTEL_HS400_ES_REG 0x78
#define INTEL_HS400_ES_BIT BIT(0)
@@ -721,7 +730,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
};
@@ -731,7 +740,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = {
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_cqhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
.irq = sdhci_cqhci_irq,
};
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 6dee4f8f2024..81e39d7507d8 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,7 +592,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
+ netif_rx_ni(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 881799bd9c5e..99e5f272205d 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -728,8 +728,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
int err;
err = pm_runtime_get_sync(priv->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_noidle(priv->dev);
return err;
+ }
err = __flexcan_get_berr_counter(dev, bec);
@@ -1565,14 +1567,10 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
- err = flexcan_transceiver_enable(priv);
- if (err)
- goto out_chip_disable;
-
/* synchronize with the can bus */
err = flexcan_chip_unfreeze(priv);
if (err)
- goto out_transceiver_disable;
+ goto out_chip_disable;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1590,8 +1588,6 @@ static int flexcan_chip_start(struct net_device *dev)
return 0;
- out_transceiver_disable:
- flexcan_transceiver_disable(priv);
out_chip_disable:
flexcan_chip_disable(priv);
return err;
@@ -1621,7 +1617,6 @@ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
&regs->ctrl);
- flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
return 0;
@@ -1654,17 +1649,23 @@ static int flexcan_open(struct net_device *dev)
}
err = pm_runtime_get_sync(priv->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_noidle(priv->dev);
return err;
+ }
err = open_candev(dev);
if (err)
goto out_runtime_put;
- err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
goto out_close;
+ err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_transceiver_disable;
+
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
else
@@ -1716,6 +1717,8 @@ static int flexcan_open(struct net_device *dev)
can_rx_offload_del(&priv->offload);
out_free_irq:
free_irq(dev->irq, dev);
+ out_transceiver_disable:
+ flexcan_transceiver_disable(priv);
out_close:
close_candev(dev);
out_runtime_put:
@@ -1734,6 +1737,7 @@ static int flexcan_close(struct net_device *dev)
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
+ flexcan_transceiver_disable(priv);
close_candev(dev);
pm_runtime_put(priv->dev);
@@ -1852,7 +1856,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
return -EINVAL;
/* stop mode property format is:
- * <&gpr req_gpr>.
+ * <&gpr req_gpr req_bit>.
*/
ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
ARRAY_SIZE(out_val));
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 6f766918211a..72acd1ba162d 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -287,12 +287,12 @@ struct kvaser_pciefd_tx_packet {
static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
.name = KVASER_PCIEFD_DRV_NAME,
.tseg1_min = 1,
- .tseg1_max = 255,
+ .tseg1_max = 512,
.tseg2_min = 1,
.tseg2_max = 32,
.sjw_max = 16,
.brp_min = 1,
- .brp_max = 4096,
+ .brp_max = 8192,
.brp_inc = 1,
};
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 48be627c85c2..5f9f8192dd0b 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -16,7 +16,8 @@ config CAN_M_CAN_PLATFORM
config CAN_M_CAN_TCAN4X5X
depends on CAN_M_CAN
- depends on REGMAP_SPI
+ depends on SPI
+ select REGMAP_SPI
tristate "TCAN4X5X M_CAN device"
help
Say Y here if you want support for Texas Instruments TCAN4x5x
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 02c5795b7393..f3fc37e96b08 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -665,7 +665,7 @@ static int m_can_handle_state_change(struct net_device *dev,
unsigned int ecr;
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
cdev->can.state = CAN_STATE_ERROR_WARNING;
@@ -694,7 +694,7 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -956,6 +956,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
struct net_device_stats *stats = &dev->stats;
u32 ir;
+ if (pm_runtime_suspended(cdev->dev))
+ return IRQ_NONE;
ir = m_can_read(cdev, M_CAN_IR);
if (!ir)
return IRQ_NONE;
@@ -1414,6 +1416,9 @@ static void m_can_stop(struct net_device *dev)
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
+ /* Set init mode to disengage from the network */
+ m_can_config_endisable(cdev, true);
+
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
}
@@ -1812,6 +1817,12 @@ out:
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
+void m_can_class_free_dev(struct net_device *net)
+{
+ free_candev(net);
+}
+EXPORT_SYMBOL_GPL(m_can_class_free_dev);
+
int m_can_class_register(struct m_can_classdev *m_can_dev)
{
int ret;
@@ -1850,7 +1861,6 @@ pm_runtime_fail:
if (ret) {
if (m_can_dev->pm_clock_support)
pm_runtime_disable(m_can_dev->dev);
- free_candev(m_can_dev->net);
}
return ret;
@@ -1908,8 +1918,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev)
unregister_candev(m_can_dev->net);
m_can_clk_stop(m_can_dev);
-
- free_candev(m_can_dev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 49f42b50627a..b2699a7c9997 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -99,6 +99,7 @@ struct m_can_classdev {
};
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev);
+void m_can_class_free_dev(struct net_device *net);
int m_can_class_register(struct m_can_classdev *cdev);
void m_can_class_unregister(struct m_can_classdev *cdev);
int m_can_class_get_clocks(struct m_can_classdev *cdev);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index e6d0cb9ee02f..161cb9be018c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -67,32 +67,36 @@ static int m_can_plat_probe(struct platform_device *pdev)
return -ENOMEM;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ ret = -ENOMEM;
+ goto probe_fail;
+ }
mcan_class->device_data = priv;
- m_can_class_get_clocks(mcan_class);
+ ret = m_can_class_get_clocks(mcan_class);
+ if (ret)
+ goto probe_fail;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
addr = devm_ioremap_resource(&pdev->dev, res);
irq = platform_get_irq_byname(pdev, "int0");
if (IS_ERR(addr) || irq < 0) {
ret = -EINVAL;
- goto failed_ret;
+ goto probe_fail;
}
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
if (!res) {
ret = -ENODEV;
- goto failed_ret;
+ goto probe_fail;
}
mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!mram_addr) {
ret = -ENOMEM;
- goto failed_ret;
+ goto probe_fail;
}
priv->base = addr;
@@ -111,9 +115,10 @@ static int m_can_plat_probe(struct platform_device *pdev)
m_can_init_ram(mcan_class);
- ret = m_can_class_register(mcan_class);
+ return m_can_class_register(mcan_class);
-failed_ret:
+probe_fail:
+ m_can_class_free_dev(mcan_class->net);
return ret;
}
@@ -134,6 +139,8 @@ static int m_can_plat_remove(struct platform_device *pdev)
m_can_class_unregister(mcan_class);
+ m_can_class_free_dev(mcan_class->net);
+
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index eacd428e07e9..e5d7d85e0b6d 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -440,14 +440,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
return -ENOMEM;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_m_can_class_free_dev;
+ }
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- else
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ } else {
priv->power = NULL;
+ }
mcan_class->device_data = priv;
@@ -460,8 +464,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
}
/* Sanity check */
- if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF)
- return -ERANGE;
+ if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) {
+ ret = -ERANGE;
+ goto out_m_can_class_free_dev;
+ }
priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
priv->mram_start = TCAN4X5X_MRAM_START;
@@ -487,6 +493,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
&spi->dev, &tcan4x5x_regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ goto out_clk;
+ }
ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
@@ -514,8 +524,10 @@ out_clk:
clk_disable_unprepare(mcan_class->cclk);
clk_disable_unprepare(mcan_class->hclk);
}
-
+ out_m_can_class_free_dev:
+ m_can_class_free_dev(mcan_class->net);
dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
+
return ret;
}
@@ -523,9 +535,11 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ m_can_class_unregister(priv->mcan_dev);
+
tcan4x5x_power_enable(priv->power, 0);
- m_can_class_unregister(priv->mcan_dev);
+ m_can_class_free_dev(priv->mcan_dev->net);
return 0;
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 9913f5458279..2c22f40e12bd 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -881,7 +881,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "hecc ioremap failed\n");
- return PTR_ERR(priv->base);
+ err = PTR_ERR(priv->base);
+ goto probe_exit_candev;
}
/* handle hecc-ram memory */
@@ -889,20 +890,22 @@ static int ti_hecc_probe(struct platform_device *pdev)
"hecc-ram");
if (IS_ERR(priv->hecc_ram)) {
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
- return PTR_ERR(priv->hecc_ram);
+ err = PTR_ERR(priv->hecc_ram);
+ goto probe_exit_candev;
}
/* handle mbx memory */
priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
if (IS_ERR(priv->mbx)) {
dev_err(&pdev->dev, "mbx ioremap failed\n");
- return PTR_ERR(priv->mbx);
+ err = PTR_ERR(priv->mbx);
+ goto probe_exit_candev;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "No irq resource\n");
- goto probe_exit;
+ goto probe_exit_candev;
}
priv->ndev = ndev;
@@ -966,7 +969,7 @@ probe_exit_release_clk:
clk_put(priv->clk);
probe_exit_candev:
free_candev(ndev);
-probe_exit:
+
return err;
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 7ab87a758754..218fadc91155 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -367,7 +367,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.tseg2_max = 32,
.sjw_max = 16,
.brp_min = 1,
- .brp_max = 4096,
+ .brp_max = 8192,
.brp_inc = 1,
};
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 5857b37dcd96..e97f2e0da6b0 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -326,8 +326,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (!ctx)
return NETDEV_TX_BUSY;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
-
if (cf->can_id & CAN_EFF_FLAG) {
/* SIDH | SIDL | EIDH | EIDL
* 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
@@ -357,6 +355,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
goto xmit_failed;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c2764799f9ef..204ccb27d6d9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -156,7 +156,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
/* case when event time (tsw) wraps */
if (ts < time_ref->ts_dev_1)
- delta_ts = 1 << time_ref->adapter->ts_used_bits;
+ delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits);
/* Otherwise, sync time counter (ts_dev_2) has wrapped:
* handle case when event time (tsn) hasn't.
@@ -168,7 +168,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
* tsn ts
*/
} else if (time_ref->ts_dev_1 < ts) {
- delta_ts = -(1 << time_ref->adapter->ts_used_bits);
+ delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits);
}
/* add delay between last sync and event timestamps */
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 74db81dafee3..09701c17f3f6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -26,6 +26,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/if_bridge.h>
@@ -1837,6 +1838,16 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
i++;
}
+ /* The standalone PHY11G requires 300ms to be fully
+ * initialized and ready for any MDIO communication after being
+ * taken out of reset. For the SoC-internal GPHY variant there
+ * is no (known) documentation for the minimum time after a
+ * reset. Use the same value as for the standalone variant as
+ * some users have reported internal PHYs not being detected
+ * without any delay.
+ */
+ msleep(300);
+
return 0;
remove_gphy:
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index bd297ae7cf9e..34cca0a4b31c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2297,6 +2297,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
+
+ mv88e6xxx_g1_wait_eeprom_done(chip);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index f62aa83ca08d..33d443a37efc 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
+void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ u16 val;
+ int err;
+
+ /* Wait up to 1 second for the switch to finish reading the
+ * EEPROM.
+ */
+ while (time_before(jiffies, timeout)) {
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err) {
+ dev_err(chip->dev, "Error reading status");
+ return;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val != 0xffff &&
+ val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
+ return;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(chip->dev, "Timeout waiting for EEPROM done");
+}
+
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1e3546f8b072..e05abe61fa11 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -278,6 +278,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 48390b7b18ad..1048509a849b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -125,11 +125,9 @@ static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
* Offset 0x08: VTU/STU Data Register 2
* Offset 0x09: VTU/STU Data Register 3
*/
-
-static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
+ u16 *regs)
{
- u16 regs[3];
int i;
/* Read all 3 VTU/STU Data registers */
@@ -142,12 +140,45 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
return err;
}
- /* Extract MemberTag and PortState data */
+ return 0;
+}
+
+static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
- unsigned int state_offset = member_offset + 2;
entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract PortState data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int state_offset = (i % 4) * 4 + 2;
+
entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
@@ -349,6 +380,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ err = mv88e6185_g1_stu_data_read(chip, entry);
+ if (err)
+ return err;
+
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
*/
@@ -374,16 +409,20 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- /* Fetch (and mask) VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
if (err)
return err;
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ /* Fetch VLAN PortState data from the STU */
+ err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6185_g1_stu_data_read(chip, entry);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0c12cf7bda50..3f65f2b370c5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2543,8 +2543,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
- (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 098b0328e3cb..ff9f96de74b8 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2312,8 +2312,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
- (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74c1778d841e..b455b60a5434 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2383,7 +2383,8 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7975f59735d6..7c21aaa8b9af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4099,7 +4099,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
- if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
+ if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
@@ -7757,6 +7758,7 @@ static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
{
u64 sw_tmp;
+ hw &= mask;
sw_tmp = (*sw & ~mask) | hw;
if (hw < (*sw & mask))
sw_tmp += mask + 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 53687bc7fcf5..1471c9a36238 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2079,6 +2079,9 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_input req = {0};
int rc;
+ if (BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -2997,7 +3000,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
start, length, data);
}
return rc;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 00024dd41147..80fb1f537bb3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1907,6 +1907,8 @@ err_register_netdev:
clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
err_ncsi_dev:
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
iounmap(priv->base);
@@ -1926,6 +1928,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
netdev = platform_get_drvdata(pdev);
priv = netdev_priv(netdev);
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
unregister_netdev(netdev);
clk_disable_unprepare(priv->rclk);
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 0fa18b00c49b..d99ea0f4e4a6 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -16,6 +16,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI
+ select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 52be6e315752..fc2075ea57fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -33,7 +33,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
+ enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
+ enetc_unlock_mdio();
+
if (unlikely(!count))
goto drop_packet_err;
@@ -239,7 +242,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb);
/* let H/W know BD ring has been updated */
- enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
return count;
@@ -262,12 +265,16 @@ static irqreturn_t enetc_msix(int irq, void *data)
struct enetc_int_vector *v = data;
int i;
+ enetc_lock_mdio();
+
/* disable interrupts */
- enetc_wr_reg(v->rbier, 0);
- enetc_wr_reg(v->ricr1, v->rx_ictt);
+ enetc_wr_reg_hot(v->rbier, 0);
+ enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+ enetc_unlock_mdio();
napi_schedule(&v->napi);
@@ -334,19 +341,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
+
/* enable interrupts */
- enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
- ENETC_TBIER_TXTIE);
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
return work_done;
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
- int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
}
@@ -386,7 +397,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_lock_mdio();
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ enetc_unlock_mdio();
do_tstamp = false;
@@ -429,16 +443,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_swbd = tx_ring->tx_swbd;
}
+ enetc_lock_mdio();
+
/* BD iteration loop end */
if (is_eof) {
tx_frm_cnt++;
/* re-arm interrupt source */
- enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
- BIT(16 + tx_ring->index));
+ enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
+ BIT(16 + tx_ring->index));
}
if (unlikely(!bds_to_clean))
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+ enetc_unlock_mdio();
}
tx_ring->next_to_clean = i;
@@ -515,8 +533,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
- /* update ENETC's consumer index */
- enetc_wr_reg(rx_ring->rcir, i);
}
return j;
@@ -534,8 +550,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
u64 tstamp;
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
- lo = enetc_rd(hw, ENETC_SICTR0);
- hi = enetc_rd(hw, ENETC_SICTR1);
+ lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
+ hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
rxbd = enetc_rxbd_ext(rxbd);
tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo)
@@ -684,23 +700,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
u32 bd_status;
u16 size;
+ enetc_lock_mdio();
+
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
cleaned_cnt -= count;
}
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
- if (!bd_status)
+ if (!bd_status) {
+ enetc_unlock_mdio();
break;
+ }
- enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
size = le16_to_cpu(rxbd->r.buf_len);
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
- if (!skb)
+ if (!skb) {
+ enetc_unlock_mdio();
break;
+ }
enetc_get_offloads(rx_ring, rxbd, skb);
@@ -712,6 +736,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+ enetc_unlock_mdio();
dev_kfree_skb(skb);
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
@@ -751,6 +776,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_process_skb(rx_ring, skb);
+ enetc_unlock_mdio();
+
napi_gro_receive(napi, skb);
rx_frm_cnt++;
@@ -1225,6 +1252,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+ enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 17cf7c94fdb5..eb6bbf1113c7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -324,14 +324,100 @@ struct enetc_hw {
void __iomem *global;
};
-/* general register accessors */
-#define enetc_rd_reg(reg) ioread32((reg))
-#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
+/* ENETC register accessors */
+
+/* MDIO issue workaround (on LS1028A) -
+ * Due to a hardware issue, an access to MDIO registers
+ * that is concurrent with other ENETC register accesses
+ * may lead to the MDIO access being dropped or corrupted.
+ * To protect the MDIO accesses a readers-writers locking
+ * scheme is used, where the MDIO register accesses are
+ * protected by write locks to insure exclusivity, while
+ * the remaining ENETC registers are accessed under read
+ * locks since they only compete with MDIO accesses.
+ */
+extern rwlock_t enetc_mdio_lock;
+
+/* use this locking primitive only on the fast datapath to
+ * group together multiple non-MDIO register accesses to
+ * minimize the overhead of the lock
+ */
+static inline void enetc_lock_mdio(void)
+{
+ read_lock(&enetc_mdio_lock);
+}
+
+static inline void enetc_unlock_mdio(void)
+{
+ read_unlock(&enetc_mdio_lock);
+}
+
+/* use these accessors only on the fast datapath under
+ * the enetc_lock_mdio() locking primitive to minimize
+ * the overhead of the lock
+ */
+static inline u32 enetc_rd_reg_hot(void __iomem *reg)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ return ioread32(reg);
+}
+
+static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ iowrite32(val, reg);
+}
+
+/* internal helpers for the MDIO w/a */
+static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
+{
+ u32 val;
+
+ enetc_lock_mdio();
+ val = ioread32(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
+{
+ enetc_lock_mdio();
+ iowrite32(val, reg);
+ enetc_unlock_mdio();
+}
+
+static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+
+ return val;
+}
+
+static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+}
+
#ifdef ioread64
-#define enetc_rd_reg64(reg) ioread64((reg))
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
+{
+ return ioread64(reg);
+}
#else
/* using this to read out stats on 32b systems */
-static inline u64 enetc_rd_reg64(void __iomem *reg)
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
{
u32 low, high, tmp;
@@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg)
}
#endif
+static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
+{
+ u64 val;
+
+ enetc_lock_mdio();
+ val = _enetc_rd_reg64(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+/* general register accessors */
+#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg))
+#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val))
#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
-#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
+#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off))
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
+#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
+ (hw)->port + (off), val)
/* global register accessors - PF only */
#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 48c32a171afa..ee0116ed4738 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -16,13 +16,13 @@
static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
{
- return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
}
static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
u32 val)
{
- enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
}
#define enetc_mdio_rd(mdio_priv, off) \
@@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
return hw;
}
EXPORT_SYMBOL_GPL(enetc_hw_alloc);
+
+/* Lock for MDIO access errata on LS1028A */
+DEFINE_RWLOCK(enetc_mdio_lock);
+EXPORT_SYMBOL_GPL(enetc_mdio_lock);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d7919555250d..04f24c66cf36 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1808,7 +1808,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int ret = 0, frame_start, frame_addr, frame_op;
bool is_c45 = !!(regnum & MII_ADDR_C45);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1867,11 +1867,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int ret, frame_start, frame_addr;
bool is_c45 = !!(regnum & MII_ADDR_C45);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- else
- ret = 0;
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -2275,7 +2273,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
u32 i, off;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2976,7 +2974,7 @@ fec_enet_open(struct net_device *ndev)
int ret;
bool reset_again;
- ret = pm_runtime_get_sync(&fep->pdev->dev);
+ ret = pm_runtime_resume_and_get(&fep->pdev->dev);
if (ret < 0)
return ret;
@@ -3770,7 +3768,7 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 54b0bf574c05..4a9041ee1b39 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2287,6 +2287,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
@@ -2295,8 +2296,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
sinfo->nr_frags++;
-
- rx_desc->buf_phys_addr = 0;
+ } else {
+ page_pool_put_full_page(rxq->page_pool, page, true);
}
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 1b97adae542e..be5677623455 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -676,7 +676,8 @@ static int prestera_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(&pdev->dev, "fail to set DMA mask\n");
goto err_dma_mask;
}
@@ -702,8 +703,10 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
- if (!fw->wq)
+ if (!fw->wq) {
+ err = -ENOMEM;
goto err_wq_alloc;
+ }
INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 13250553263b..a8641a407c06 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -966,6 +966,7 @@ static int mtk_star_enable(struct net_device *ndev)
mtk_star_adjust_link, 0, priv->phy_intf);
if (!priv->phydev) {
netdev_err(ndev, "failed to connect to PHY\n");
+ ret = -ENODEV;
goto err_free_irq;
}
@@ -1053,7 +1054,7 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
err_drop_packet:
dev_kfree_skb(skb);
ndev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
/* Returns the number of bytes sent or a negative number on the first
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6ff9620a137..f6cfec81ccc3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1864,8 +1864,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
-#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
-#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
+#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
@@ -1873,7 +1873,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
-#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 650ae08c71de..8f020f26ebf5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -182,8 +182,8 @@ struct mlx4_init_hca_param {
u64 cmpt_base;
u64 mtt_base;
u64 global_caps;
- u16 log_mc_entry_sz;
- u16 log_mc_hash_sz;
+ u8 log_mc_entry_sz;
+ u8 log_mc_hash_sz;
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
u8 log_num_qps;
u8 log_num_srqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 3e44e4d820c5..95f2b26a3ee3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -187,7 +187,7 @@ static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
+ if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;
priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0e45590662a8..381a9c8c9da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -64,13 +64,13 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
- /* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
+ /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
MLX5_SET(copy_action_in, action, src_offset, 0);
MLX5_SET(copy_action_in, action, length, 7);
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 0);
+ MLX5_SET(copy_action_in, action, dst_offset, 24);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
@@ -488,13 +488,13 @@ static int rx_add_rule(struct mlx5e_priv *priv,
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
- /* Set 1 bit ipsec marker */
- /* Set 24 bit ipsec_obj_id */
+ /* Set bit[31] ipsec marker */
+ /* Set bit[23-0] ipsec_obj_id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
- MLX5_SET(set_action_in, action, offset, 7);
- MLX5_SET(set_action_in, action, length, 25);
+ MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 11e31a3db2be..a9b45606dbdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -453,7 +453,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
- u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
struct mlx5e_priv *priv;
struct xfrm_offload *xo;
struct xfrm_state *xs;
@@ -481,7 +480,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
- switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
+ switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 056dacb612b0..9df9b9a8e09b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,10 @@
#include "en.h"
#include "en/txrx.h"
-#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
-#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
-#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
@@ -78,7 +79,7 @@ static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_st
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
- return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
+ return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 7f6221b8b1f7..6a1d82503ef8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -476,19 +476,22 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct tcphdr);
- if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
+ if (unlikely(!sk))
return;
- if (unlikely(!resync_queue_get_psv(sk)))
- return;
+ if (unlikely(sk->sk_state == TCP_TIME_WAIT))
+ goto unref;
- skb->sk = sk;
- skb->destructor = sock_edemux;
+ if (unlikely(!resync_queue_get_psv(sk)))
+ goto unref;
seq = th->seq;
datalen = skb->len - depth;
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
rq->stats->tls_resync_req_start++;
+
+unref:
+ sock_gen_put(sk);
}
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2e2fa0440032..ce710f22b1ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -5229,8 +5229,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (IS_ERR(tc->ct))
+ if (IS_ERR(tc->ct)) {
+ err = PTR_ERR(tc->ct);
goto err_ct;
+ }
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 3b979008143d..4a2ce241522e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -283,6 +283,9 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
reg_b = be32_to_cpu(cqe->ft_metadata);
+ if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
+ return false;
+
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
if (chain)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 82b4419af9d4..6dd3ea3cbbed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -144,7 +144,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
+/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
+ * need to set L3 checksum flag for IPsec
+ */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
@@ -154,7 +156,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
sq->stats->csum_partial_inner++;
} else {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
}
@@ -162,11 +163,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
- if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
- return;
- }
-
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -177,6 +173,9 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
+ } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e8e6294c7cca..d4ee0a9c03db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1142,6 +1142,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (!vport->qos.enabled)
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
return mlx5_modify_scheduling_element_cmd(esw->dev,
@@ -1408,6 +1412,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
int i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ memset(&vport->qos, 0, sizeof(vport->qos));
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -2221,12 +2226,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
max_guarantee = evport->info.min_rate;
}
- return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ if (max_guarantee)
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ return 0;
}
-static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ u32 divider = calculate_vports_min_rate_divider(esw);
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
@@ -2239,9 +2247,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
continue;
vport_min_rate = evport->info.min_rate;
vport_max_rate = evport->info.max_rate;
- bw_share = MLX5_MIN_BW_SHARE;
+ bw_share = 0;
- if (vport_min_rate)
+ if (divider)
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
divider,
fw_max_bw_share);
@@ -2266,7 +2274,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u32 fw_max_bw_share;
u32 previous_min_rate;
- u32 divider;
bool min_rate_supported;
bool max_rate_supported;
int err = 0;
@@ -2291,8 +2298,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
previous_min_rate = evport->info.min_rate;
evport->info.min_rate = min_rate;
- divider = calculate_vports_min_rate_divider(esw);
- err = normalize_vports_min_rate(esw, divider);
+ err = normalize_vports_min_rate(esw);
if (err) {
evport->info.min_rate = previous_min_rate;
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 325a5b0d6829..9fdd99272e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -534,6 +534,13 @@ static void del_sw_hw_rule(struct fs_node *node)
goto out;
}
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
+ --fte->dests_size) {
+ fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ goto out;
+ }
+
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
fte->modify_mask |=
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 872e9910bb7c..a619d90559f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -6,6 +6,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
+ select MLXFW
help
This driver supports Mellanox Technologies Switch ASICs family.
@@ -82,7 +83,6 @@ config MLXSW_SPECTRUM
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
- select MLXFW
imply PTP_1588_CLOCK
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 937b8e46f8c7..1a86535c4968 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -571,7 +571,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
if (trans->core->fw_flash_in_progress)
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
- queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
+ timeout << trans->retries);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e2c99d909247..b319c22c211c 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -148,7 +148,8 @@ static void lan743x_intr_software_isr(void *context)
int_sts = lan743x_csr_read(adapter, INT_STS);
if (int_sts & INT_BIT_SW_GP_) {
- lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
+ /* disable the interrupt to prevent repeated re-triggering */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
intr->software_isr_flag = 1;
}
}
@@ -1307,13 +1308,13 @@ clean_up_data_descriptor:
goto clear_active;
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
goto clear_skb;
}
if (cleanup) {
lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
} else {
ignore_sync = (buffer_info->flags &
TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
@@ -1623,7 +1624,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors >
lan743x_tx_get_avail_desc(tx)) {
if (required_number_of_descriptors > (tx->ring_size - 1)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
} else {
/* save to overflow buffer */
tx->overflow_skb = skb;
@@ -1656,7 +1657,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
start_frame_length,
do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
@@ -1675,7 +1676,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
* frame assembler clean up was performed inside
* lan743x_tx_frame_add_fragment
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0e4cd8890cff..0a22f8ce9a2c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1647,9 +1647,9 @@ static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
ilog2(rounded_conn_num));
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->first_free);
+ p_hwfn->p_cxt_mngr->src_t2.first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->last_free);
+ p_hwfn->p_cxt_mngr->src_t2.last_free);
}
/* Timers PF */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8b64495f8745..056e79620a0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -326,9 +326,6 @@ struct qed_cxt_mngr {
/* SRC T2 */
struct qed_src_t2 src_t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
/* total number of SRQ's for this hwfn */
u32 srq_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 512cbef24097..a99861124630 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2754,14 +2754,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
sizeof(*iwarp_info->partial_fpdus),
GFP_KERNEL);
- if (!iwarp_info->partial_fpdus)
+ if (!iwarp_info->partial_fpdus) {
+ rc = -ENOMEM;
goto err;
+ }
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
- if (!iwarp_info->mpa_intermediate_buf)
+ if (!iwarp_info->mpa_intermediate_buf) {
+ rc = -ENOMEM;
goto err;
+ }
/* The mpa_bufs array serves for pending RX packets received on the
* mpa ll2 that don't have place on the tx ring and require later
@@ -2771,8 +2775,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
sizeof(*iwarp_info->mpa_bufs),
GFP_KERNEL);
- if (!iwarp_info->mpa_bufs)
+ if (!iwarp_info->mpa_bufs) {
+ rc = -ENOMEM;
goto err;
+ }
INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b8af59fc1aa4..d2c190732d3e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2231,7 +2231,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
/* Boot either flash image or firmware image from host file system */
if (qlcnic_load_fw_file == 1) {
- if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ err = qlcnic_83xx_load_fw_image_from_host(adapter);
+ if (err)
return err;
} else {
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 29a7bfa2584d..3d7d3ab383f8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -188,6 +188,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
+ if (unlikely(!port)) {
+ atomic_long_inc(&skb->dev->rx_nohandler);
+ kfree_skb(skb);
+ goto done;
+ }
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index f61cb997a8f6..82b1c7a5a7a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -113,8 +113,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk))
+ if (IS_ERR(dwmac->tx_clk)) {
+ ret = PTR_ERR(dwmac->tx_clk);
goto err_remove_config_dt;
+ }
clk_prepare_enable(dwmac->tx_clk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index cb87d31a99df..57a53a600aa5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -23,7 +23,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 200000);
}
/* CSR1 enables the transmit DMA to check for new descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d833908b660a..ba45fe237512 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5247,6 +5247,7 @@ int stmmac_resume(struct device *dev)
return ret;
}
+ rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -5262,6 +5263,7 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
mutex_unlock(&priv->lock);
+ rtnl_unlock();
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
rtnl_lock();
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 75056c14b161..5dc60ecabe56 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -1001,8 +1001,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
dev_err(dev, "Failed to register ptp clk %ld\n",
PTR_ERR(cpts->ptp_clock));
- if (!cpts->ptp_clock)
- ret = -ENODEV;
+ ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
goto refclk_disable;
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9fd1f77190ad..b0f00b4edd94 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -838,9 +838,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->cpts))
- dev_err(priv->dev, "error registering cpts device\n");
-
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
}
cpsw_restore(priv);
@@ -1631,6 +1634,7 @@ static int cpsw_probe(struct platform_device *pdev)
CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(dev, "error allocating net_device\n");
+ ret = -ENOMEM;
goto clean_cpts;
}
@@ -1716,7 +1720,6 @@ static int cpsw_probe(struct platform_device *pdev)
/* Enable misc CPTS evnt_pend IRQ */
cpts_set_irqpoll(cpsw->cpts, false);
- writel(0x10, &cpsw->wr_regs->misc_en);
skip_cpts:
cpsw_notice(priv, probe,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index f779d2e1b5c5..2f5e0ad23ad7 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -873,8 +873,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->cpts))
- dev_err(priv->dev, "error registering cpts device\n");
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
napi_enable(&cpsw->napi_rx);
napi_enable(&cpsw->napi_tx);
@@ -2006,7 +2010,6 @@ static int cpsw_probe(struct platform_device *pdev)
/* Enable misc CPTS evnt_pend IRQ */
cpts_set_irqpoll(cpsw->cpts, false);
- writel(0x10, &cpsw->wr_regs->misc_en);
skip_cpts:
ret = cpsw_register_notifiers(cpsw);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index d07008a818df..1426bfc009bc 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -224,8 +224,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
if (ip_tunnel_collect_metadata() || gs->collect_md) {
__be16 flags;
- flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
- (gnvh->oam ? TUNNEL_OAM : 0) |
+ flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 92642030e735..e8599bb948c0 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -362,22 +362,31 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
return trans;
}
-/* Free a previously-allocated transaction (used only in case of error) */
+/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
+ refcount_t *refcount = &trans->refcount;
struct gsi_trans_info *trans_info;
+ bool last;
- if (!refcount_dec_and_test(&trans->refcount))
+ /* We must hold the lock to release the last reference */
+ if (refcount_dec_not_one(refcount))
return;
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
spin_lock_bh(&trans_info->spinlock);
- list_del(&trans->links);
+ /* Reference might have been added before we got the lock */
+ last = refcount_dec_and_test(refcount);
+ if (last)
+ list_del(&trans->links);
spin_unlock_bh(&trans_info->spinlock);
+ if (!last)
+ return;
+
ipa_gsi_trans_release(trans);
/* Releasing the reserved TREs implicitly frees the sgl[] and
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index d07061417675..e7972e88ffe0 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -96,6 +96,7 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
.open = simple_open,
.write = nsim_dev_take_snapshot_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
@@ -188,6 +189,7 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
.read = nsim_dev_trap_fa_cookie_read,
.write = nsim_dev_trap_fa_cookie_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 62958b238d50..21e2974660e7 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -261,6 +261,7 @@ static const struct file_operations nsim_dev_health_break_fops = {
.open = simple_open,
.write = nsim_dev_health_break_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 6ab023acefd6..02dc3123eb6c 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -124,6 +124,7 @@ static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
.open = simple_open,
.write = nsim_udp_tunnels_info_reset_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 6cf9b798b710..10be266e48e8 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -981,7 +981,6 @@ int vsc8584_macsec_init(struct phy_device *phydev)
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_VSC856X:
- case PHY_ID_VSC8575:
case PHY_ID_VSC8582:
case PHY_ID_VSC8584:
INIT_LIST_HEAD(&vsc8531->macsec_flows);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ec97669be5c2..0fc39ac5ca88 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -291,8 +291,10 @@ static int smsc_phy_probe(struct phy_device *phydev)
return ret;
ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(priv->refclk);
return ret;
+ }
return 0;
}
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index ca89d8258dd3..c4568a491dc4 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -197,7 +197,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* enable ethernet mode (?) */
- if (cx82310_enable_ethernet(dev))
+ ret = cx82310_enable_ethernet(dev);
+ if (ret)
goto err;
/* get the MAC address */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 581ed51abb53..fc378ff56775 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1070,7 +1070,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
- {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b01afcb7777..9a270e49df17 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2929,7 +2929,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
- struct nvme_cel *cel = xa_load(&ctrl->cels, csi);
+ struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
@@ -2940,16 +2940,15 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
return -ENOMEM;
ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
- &cel->log, sizeof(cel->log), 0);
+ cel, sizeof(*cel), 0);
if (ret) {
kfree(cel);
return ret;
}
- cel->csi = csi;
- xa_store(&ctrl->cels, cel->csi, cel, GFP_KERNEL);
+ xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
out:
- *log = &cel->log;
+ *log = cel;
return 0;
}
@@ -4374,6 +4373,19 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
+static void nvme_free_cels(struct nvme_ctrl *ctrl)
+{
+ struct nvme_effects_log *cel;
+ unsigned long i;
+
+ xa_for_each (&ctrl->cels, i, cel) {
+ xa_erase(&ctrl->cels, i);
+ kfree(cel);
+ }
+
+ xa_destroy(&ctrl->cels);
+}
+
static void nvme_free_ctrl(struct device *dev)
{
struct nvme_ctrl *ctrl =
@@ -4383,8 +4395,7 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
- xa_destroy(&ctrl->cels);
-
+ nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
__free_page(ctrl->discard_page);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bc330bf0d3bd..567f7ad18a91 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -226,12 +226,6 @@ struct nvme_fault_inject {
#endif
};
-struct nvme_cel {
- struct list_head entry;
- struct nvme_effects_log log;
- u8 csi;
-};
-
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0578ff253c47..3be352403839 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -292,9 +292,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}
+static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
+{
+ if (!nvmeq->qid)
+ return;
+
+ nvmeq->dbbuf_sq_db = NULL;
+ nvmeq->dbbuf_cq_db = NULL;
+ nvmeq->dbbuf_sq_ei = NULL;
+ nvmeq->dbbuf_cq_ei = NULL;
+}
+
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
struct nvme_command c;
+ unsigned int i;
if (!dev->dbbuf_dbs)
return;
@@ -308,6 +320,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
+
+ for (i = 1; i <= dev->online_queues; i++)
+ nvme_dbbuf_free(&dev->queues[i]);
}
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a5ad553da8cd..42bbd99a36ac 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1315,7 +1315,6 @@ static int _regulator_do_enable(struct regulator_dev *rdev);
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
- * @constraints: constraints to apply
*
* Allows platform initialisation code to define and constrain
* regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
@@ -1323,21 +1322,11 @@ static int _regulator_do_enable(struct regulator_dev *rdev);
* regulator operations to proceed i.e. set_voltage, set_current_limit,
* set_mode.
*/
-static int set_machine_constraints(struct regulator_dev *rdev,
- const struct regulation_constraints *constraints)
+static int set_machine_constraints(struct regulator_dev *rdev)
{
int ret = 0;
const struct regulator_ops *ops = rdev->desc->ops;
- if (constraints)
- rdev->constraints = kmemdup(constraints, sizeof(*constraints),
- GFP_KERNEL);
- else
- rdev->constraints = kzalloc(sizeof(*constraints),
- GFP_KERNEL);
- if (!rdev->constraints)
- return -ENOMEM;
-
ret = machine_constraints_voltage(rdev, rdev->constraints);
if (ret != 0)
return ret;
@@ -1852,6 +1841,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
}
+ if (r == rdev) {
+ dev_err(dev, "Supply for %s (%s) resolved to itself\n",
+ rdev->desc->name, rdev->supply_name);
+ if (!have_full_constraints())
+ return -EINVAL;
+ r = dummy_regulator_rdev;
+ get_device(&r->dev);
+ }
+
/*
* If the supply's parent device is not the same as the
* regulator's parent device, then ensure the parent device
@@ -5146,7 +5144,6 @@ struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
const struct regulator_config *cfg)
{
- const struct regulation_constraints *constraints = NULL;
const struct regulator_init_data *init_data;
struct regulator_config *config = NULL;
static atomic_t regulator_no = ATOMIC_INIT(-1);
@@ -5285,14 +5282,23 @@ regulator_register(const struct regulator_desc *regulator_desc,
/* set regulator constraints */
if (init_data)
- constraints = &init_data->constraints;
+ rdev->constraints = kmemdup(&init_data->constraints,
+ sizeof(*rdev->constraints),
+ GFP_KERNEL);
+ else
+ rdev->constraints = kzalloc(sizeof(*rdev->constraints),
+ GFP_KERNEL);
+ if (!rdev->constraints) {
+ ret = -ENOMEM;
+ goto wash;
+ }
if (init_data && init_data->supply_regulator)
rdev->supply_name = init_data->supply_regulator;
else if (regulator_desc->supply_name)
rdev->supply_name = regulator_desc->supply_name;
- ret = set_machine_constraints(rdev, constraints);
+ ret = set_machine_constraints(rdev);
if (ret == -EPROBE_DEFER) {
/* Regulator might be in bypass mode and so needs its supply
* to set the constraints */
@@ -5301,7 +5307,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
* that is just being created */
ret = regulator_resolve_supply(rdev);
if (!ret)
- ret = set_machine_constraints(rdev, constraints);
+ ret = set_machine_constraints(rdev);
else
rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
ERR_PTR(ret));
@@ -5843,13 +5849,14 @@ static int regulator_late_cleanup(struct device *dev, void *data)
if (rdev->use_count)
goto unlock;
- /* If we can't read the status assume it's on. */
+ /* If we can't read the status assume it's always on. */
if (ops->is_enabled)
enabled = ops->is_enabled(rdev);
else
enabled = 1;
- if (!enabled)
+ /* But if reading the status failed, assume that it's off. */
+ if (enabled <= 0)
goto unlock;
if (have_full_constraints()) {
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 7e8ba9246167..01a12cfcea7c 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -836,11 +836,14 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
* the switched regulator till yet.
*/
if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) {
- if (pfuze_chip->regulator_descs[i].sw_reg) {
- desc->ops = &pfuze100_sw_disable_regulator_ops;
- desc->enable_val = 0x8;
- desc->disable_val = 0x0;
- desc->enable_time = 500;
+ if (pfuze_chip->chip_id == PFUZE100 ||
+ pfuze_chip->chip_id == PFUZE200) {
+ if (pfuze_chip->regulator_descs[i].sw_reg) {
+ desc->ops = &pfuze100_sw_disable_regulator_ops;
+ desc->enable_val = 0x8;
+ desc->disable_val = 0x0;
+ desc->enable_time = 500;
+ }
}
}
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 3e60bff76194..9f0a4d50cead 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -342,8 +342,17 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
return ret;
}
- /* If data is exactly the same, then just update index, no change */
info = &abb->info[sel];
+ /*
+ * When Linux kernel is starting up, we are'nt sure of the
+ * Bias configuration that bootloader has configured.
+ * So, we get to know the actual setting the first time
+ * we are asked to transition.
+ */
+ if (abb->current_info_idx == -EINVAL)
+ goto just_set_abb;
+
+ /* If data is exactly the same, then just update index, no change */
oinfo = &abb->info[abb->current_info_idx];
if (!memcmp(info, oinfo, sizeof(*info))) {
dev_dbg(dev, "%s: Same data new idx=%d, old idx=%d\n", __func__,
@@ -351,6 +360,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
goto out;
}
+just_set_abb:
ret = ti_abb_set_opp(rdev, abb, info);
out:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb17fea8075c..217a7b84abdf 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2980,6 +2980,12 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
if (!block)
return -EINVAL;
+ /*
+ * If the request is an ERP request there is nothing to requeue.
+ * This will be done with the remaining original request.
+ */
+ if (cqr->refers)
+ return 0;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, false);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 1e9c3171fa9f..f9314f1393fb 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -533,8 +533,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
if (conn->task == task)
conn->task = NULL;
- if (conn->ping_task == task)
- conn->ping_task = NULL;
+ if (READ_ONCE(conn->ping_task) == task)
+ WRITE_ONCE(conn->ping_task, NULL);
/* release get from queueing */
__iscsi_put_task(task);
@@ -738,6 +738,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
+ if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
+ WRITE_ONCE(conn->ping_task, task);
+
if (!ihost->workq) {
if (iscsi_prep_mgmt_task(conn, task))
goto free_task;
@@ -941,8 +944,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
struct iscsi_nopout hdr;
struct iscsi_task *task;
- if (!rhdr && conn->ping_task)
- return -EINVAL;
+ if (!rhdr) {
+ if (READ_ONCE(conn->ping_task))
+ return -EINVAL;
+ WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
+ }
memset(&hdr, 0, sizeof(struct iscsi_nopout));
hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -957,11 +963,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!task) {
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, NULL);
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
/* only track our nops */
- conn->ping_task = task;
conn->last_ping = jiffies;
}
@@ -984,7 +991,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
struct iscsi_conn *conn = task->conn;
int rc = 0;
- if (conn->ping_task != task) {
+ if (READ_ONCE(conn->ping_task) != task) {
/*
* If this is not in response to one of our
* nops then it must be from userspace.
@@ -1923,7 +1930,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
*/
static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
{
- if (conn->ping_task &&
+ if (READ_ONCE(conn->ping_task) &&
time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
return 1;
@@ -2058,7 +2065,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* Checking the transport already or nop from a cmd timeout still
* running
*/
- if (conn->ping_task) {
+ if (READ_ONCE(conn->ping_task)) {
task->have_checked_conn = true;
rc = BLK_EH_RESET_TIMER;
goto done;
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 14c9d0133bce..c028446c7460 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1327,7 +1327,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
data = of_id->data;
- master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
+ master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
if (!master) {
dev_err(dev, "error allocating spi_master\n");
return -ENOMEM;
@@ -1367,21 +1367,17 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (res) {
qspi->base[MSPI] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[MSPI])) {
- ret = PTR_ERR(qspi->base[MSPI]);
- goto qspi_resource_err;
- }
+ if (IS_ERR(qspi->base[MSPI]))
+ return PTR_ERR(qspi->base[MSPI]);
} else {
- goto qspi_resource_err;
+ return 0;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
if (res) {
qspi->base[BSPI] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[BSPI])) {
- ret = PTR_ERR(qspi->base[BSPI]);
- goto qspi_resource_err;
- }
+ if (IS_ERR(qspi->base[BSPI]))
+ return PTR_ERR(qspi->base[BSPI]);
qspi->bspi_mode = true;
} else {
qspi->bspi_mode = false;
@@ -1392,18 +1388,14 @@ int bcm_qspi_probe(struct platform_device *pdev,
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
if (res) {
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[CHIP_SELECT])) {
- ret = PTR_ERR(qspi->base[CHIP_SELECT]);
- goto qspi_resource_err;
- }
+ if (IS_ERR(qspi->base[CHIP_SELECT]))
+ return PTR_ERR(qspi->base[CHIP_SELECT]);
}
qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
GFP_KERNEL);
- if (!qspi->dev_ids) {
- ret = -ENOMEM;
- goto qspi_resource_err;
- }
+ if (!qspi->dev_ids)
+ return -ENOMEM;
for (val = 0; val < num_irqs; val++) {
irq = -1;
@@ -1484,7 +1476,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->xfer_mode.addrlen = -1;
qspi->xfer_mode.hp = -1;
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = spi_register_master(master);
if (ret < 0) {
dev_err(dev, "can't register master\n");
goto qspi_reg_err;
@@ -1497,8 +1489,6 @@ qspi_reg_err:
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
kfree(qspi->dev_ids);
-qspi_resource_err:
- spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
@@ -1508,10 +1498,10 @@ int bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
+ spi_unregister_master(qspi->master);
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
- spi_unregister_master(qspi->master);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 7104cf17b848..197485f2c2b2 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1278,7 +1278,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct bcm2835_spi *bs;
int err;
- ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
+ ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
dma_get_cache_alignment()));
if (!ctlr)
return -ENOMEM;
@@ -1299,23 +1299,17 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->ctlr = ctlr;
bs->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(bs->regs)) {
- err = PTR_ERR(bs->regs);
- goto out_controller_put;
- }
+ if (IS_ERR(bs->regs))
+ return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(bs->clk)) {
- err = dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
- "could not get clk\n");
- goto out_controller_put;
- }
+ if (IS_ERR(bs->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
+ "could not get clk\n");
bs->irq = platform_get_irq(pdev, 0);
- if (bs->irq <= 0) {
- err = bs->irq ? bs->irq : -ENODEV;
- goto out_controller_put;
- }
+ if (bs->irq <= 0)
+ return bs->irq ? bs->irq : -ENODEV;
clk_prepare_enable(bs->clk);
@@ -1349,8 +1343,6 @@ out_dma_release:
bcm2835_dma_release(ctlr, bs);
out_clk_disable:
clk_disable_unprepare(bs->clk);
-out_controller_put:
- spi_controller_put(ctlr);
return err;
}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 03b034c15d2b..1a26865c42f8 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -494,7 +494,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
unsigned long clk_hz;
int err;
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master)
return -ENOMEM;
@@ -524,29 +524,25 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
/* the main area */
bs->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(bs->regs)) {
- err = PTR_ERR(bs->regs);
- goto out_master_put;
- }
+ if (IS_ERR(bs->regs))
+ return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
- goto out_master_put;
+ return err;
}
bs->irq = platform_get_irq(pdev, 0);
- if (bs->irq <= 0) {
- err = bs->irq ? bs->irq : -ENODEV;
- goto out_master_put;
- }
+ if (bs->irq <= 0)
+ return bs->irq ? bs->irq : -ENODEV;
/* this also enables the HW block */
err = clk_prepare_enable(bs->clk);
if (err) {
dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
- goto out_master_put;
+ return err;
}
/* just checking if the clock returns a sane value */
@@ -581,8 +577,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
out_clk_disable:
clk_disable_unprepare(bs->clk);
-out_master_put:
- spi_master_put(master);
return err;
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 40938cf3806d..ba7d40c2922f 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1260,12 +1260,14 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain QSPI reset control */
rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
dev_err(dev, "Cannot get QSPI reset.\n");
goto probe_reset_failed;
}
rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
if (IS_ERR(rstc_ocp)) {
+ ret = PTR_ERR(rstc_ocp);
dev_err(dev, "Cannot get QSPI OCP reset.\n");
goto probe_reset_failed;
}
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 2e50cc0a9291..0b2236ade412 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -357,11 +357,11 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
+ dws->transfer_handler = dw_spi_transfer_handler;
+
imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI |
SPI_INT_RXFI;
spi_umask_intr(dws, imask);
-
- dws->transfer_handler = dw_spi_transfer_handler;
}
/*
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index 8a440c7078ef..3920cd3286d8 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -477,7 +477,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
if (rc)
- return rc;
+ goto error;
list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
struct fsi_spi_sequence seq;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 986b9793fd3c..a2886ee44e4c 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -938,9 +938,6 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
spi_controller_get_devdata(controller);
pm_runtime_disable(fsl_lpspi->dev);
-
- spi_master_put(controller);
-
return 0;
}
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 341f7cffeaac..1cb9329de945 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -679,7 +679,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
struct resource *res;
int id;
- ctrl = spi_alloc_master(dev, sizeof(*fiu));
+ ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0cab239d8e7f..05c75f890ace 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -812,18 +812,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
enable = !enable;
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
- /*
- * Honour the SPI_NO_CS flag and invert the enable line, as
- * active low is default for SPI. Execution paths that handle
- * polarity inversion in gpiolib (such as device tree) will
- * enforce active high using the SPI_CS_HIGH resulting in a
- * double inversion through the code above.
- */
if (!(spi->mode & SPI_NO_CS)) {
if (spi->cs_gpiod)
+ /* polarity handled by gpiolib */
gpiod_set_value_cansleep(spi->cs_gpiod,
- !enable);
+ enable1);
else
+ /*
+ * invert the enable line, as active low is
+ * default for SPI.
+ */
gpio_set_value_cansleep(spi->cs_gpio, !enable);
}
/* Some SPI masters need both GPIO CS & slave_select */
@@ -1992,15 +1990,6 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
spi->chip_select = value;
- /*
- * For descriptors associated with the device, polarity inversion is
- * handled in the gpiolib, so all gpio chip selects are "active high"
- * in the logical sense, the gpiolib will invert the line if need be.
- */
- if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
- ctlr->cs_gpiods[spi->chip_select])
- spi->mode |= SPI_CS_HIGH;
-
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
@@ -2453,6 +2442,49 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
}
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
+static void devm_spi_release_controller(struct device *dev, void *ctlr)
+{
+ spi_controller_put(*(struct spi_controller **)ctlr);
+}
+
+/**
+ * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
+ * @dev: physical device of SPI controller
+ * @size: how much zeroed driver-private data to allocate
+ * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * Context: can sleep
+ *
+ * Allocate an SPI controller and automatically release a reference on it
+ * when @dev is unbound from its driver. Drivers are thus relieved from
+ * having to call spi_controller_put().
+ *
+ * The arguments to this function are identical to __spi_alloc_controller().
+ *
+ * Return: the SPI controller structure on success, else NULL.
+ */
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave)
+{
+ struct spi_controller **ptr, *ctlr;
+
+ ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ctlr = __spi_alloc_controller(dev, size, slave);
+ if (ctlr) {
+ *ptr = ctlr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ctlr;
+}
+EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
+
#ifdef CONFIG_OF
static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
{
@@ -2789,6 +2821,11 @@ int devm_spi_register_controller(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_spi_register_controller);
+static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
+{
+ return *(struct spi_controller **)res == ctlr;
+}
+
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
@@ -2830,7 +2867,15 @@ void spi_unregister_controller(struct spi_controller *ctlr)
list_del(&ctlr->list);
mutex_unlock(&board_lock);
- device_unregister(&ctlr->dev);
+ device_del(&ctlr->dev);
+
+ /* Release the last reference on the controller if its driver
+ * has not yet been converted to devm_spi_alloc_master/slave().
+ */
+ if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
+ devm_spi_match_controller, ctlr))
+ put_device(&ctlr->dev);
+
/* free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index f961b353c22e..8831db383fad 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -653,16 +653,11 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
return 0;
}
-static int mt7621_pcie_request_resources(struct mt7621_pcie *pcie,
- struct list_head *res)
+static void mt7621_pcie_add_resources(struct mt7621_pcie *pcie,
+ struct list_head *res)
{
- struct device *dev = pcie->dev;
-
pci_add_resource_offset(res, &pcie->io, pcie->offset.io);
pci_add_resource_offset(res, &pcie->mem, pcie->offset.mem);
- pci_add_resource(res, &pcie->busn);
-
- return devm_request_pci_bus_resources(dev, res);
}
static int mt7621_pcie_register_host(struct pci_host_bridge *host,
@@ -738,11 +733,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
setup_cm_memory_region(pcie);
- err = mt7621_pcie_request_resources(pcie, &res);
- if (err) {
- dev_err(dev, "Error requesting resources\n");
- return err;
- }
+ mt7621_pcie_add_resources(pcie, &res);
err = mt7621_pcie_register_host(bridge, &res);
if (err) {
diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
index 54e8029e6b1a..0017376234e2 100644
--- a/drivers/staging/ralink-gdma/Kconfig
+++ b/drivers/staging/ralink-gdma/Kconfig
@@ -2,6 +2,7 @@
config DMA_RALINK
tristate "RALINK DMA support"
depends on RALINK && !SOC_RT288X
+ depends on DMADEVICES
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 79b55ec827a4..b2208e5f190a 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -20,6 +20,7 @@ static const struct sdio_device_id sdio_ids[] = {
{ SDIO_DEVICE(0x024c, 0x0525), },
{ SDIO_DEVICE(0x024c, 0x0623), },
{ SDIO_DEVICE(0x024c, 0x0626), },
+ { SDIO_DEVICE(0x024c, 0x0627), },
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index f77e5eee6b80..518fac4864cf 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -483,8 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
spin_lock_bh(&conn->cmd_lock);
- if (!list_empty(&cmd->i_conn_node) &&
- !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
+ if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
@@ -4083,12 +4082,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_lock_bh(&conn->cmd_lock);
list_splice_init(&conn->conn_cmd_list, &tmp_list);
- list_for_each_entry(cmd, &tmp_list, i_conn_node) {
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
spin_lock_irq(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ /*
+ * LIO's abort path owns the cleanup for this,
+ * so put it back on the list and let
+ * aborted_task handle it.
+ */
+ list_move_tail(&cmd->i_conn_node,
+ &conn->conn_cmd_list);
+ } else {
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ }
spin_unlock_irq(&se_cmd->t_state_lock);
}
}
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
index d7f798c3394b..337c8d82f74e 100644
--- a/drivers/tee/amdtee/amdtee_private.h
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -64,9 +64,13 @@ struct amdtee_session {
/**
* struct amdtee_context_data - AMD-TEE driver context data
* @sess_list: Keeps track of sessions opened in current TEE context
+ * @shm_list: Keeps track of buffers allocated and mapped in current TEE
+ * context
*/
struct amdtee_context_data {
struct list_head sess_list;
+ struct list_head shm_list;
+ struct mutex shm_mutex; /* synchronizes access to @shm_list */
};
struct amdtee_driver_data {
@@ -89,10 +93,6 @@ struct amdtee_shm_data {
u32 buf_id;
};
-struct amdtee_shm_context {
- struct list_head shmdata_list;
-};
-
#define LOWER_TWO_BYTE_MASK 0x0000FFFF
/**
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 27b4cd77d0db..8a6a8f30bb42 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -20,7 +20,6 @@
static struct amdtee_driver_data *drv_data;
static DEFINE_MUTEX(session_list_mutex);
-static struct amdtee_shm_context shmctx;
static void amdtee_get_version(struct tee_device *teedev,
struct tee_ioctl_version_data *vers)
@@ -42,7 +41,8 @@ static int amdtee_open(struct tee_context *ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctxdata->sess_list);
- INIT_LIST_HEAD(&shmctx.shmdata_list);
+ INIT_LIST_HEAD(&ctxdata->shm_list);
+ mutex_init(&ctxdata->shm_mutex);
ctx->data = ctxdata;
return 0;
@@ -86,6 +86,7 @@ static void amdtee_release(struct tee_context *ctx)
list_del(&sess->list_node);
release_session(sess);
}
+ mutex_destroy(&ctxdata->shm_mutex);
kfree(ctxdata);
ctx->data = NULL;
@@ -152,14 +153,17 @@ static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
u32 get_buffer_id(struct tee_shm *shm)
{
- u32 buf_id = 0;
+ struct amdtee_context_data *ctxdata = shm->ctx->data;
struct amdtee_shm_data *shmdata;
+ u32 buf_id = 0;
- list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
+ mutex_lock(&ctxdata->shm_mutex);
+ list_for_each_entry(shmdata, &ctxdata->shm_list, shm_node)
if (shmdata->kaddr == shm->kaddr) {
buf_id = shmdata->buf_id;
break;
}
+ mutex_unlock(&ctxdata->shm_mutex);
return buf_id;
}
@@ -333,8 +337,9 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
int amdtee_map_shmem(struct tee_shm *shm)
{
- struct shmem_desc shmem;
+ struct amdtee_context_data *ctxdata;
struct amdtee_shm_data *shmnode;
+ struct shmem_desc shmem;
int rc, count;
u32 buf_id;
@@ -362,7 +367,10 @@ int amdtee_map_shmem(struct tee_shm *shm)
shmnode->kaddr = shm->kaddr;
shmnode->buf_id = buf_id;
- list_add(&shmnode->shm_node, &shmctx.shmdata_list);
+ ctxdata = shm->ctx->data;
+ mutex_lock(&ctxdata->shm_mutex);
+ list_add(&shmnode->shm_node, &ctxdata->shm_list);
+ mutex_unlock(&ctxdata->shm_mutex);
pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
@@ -371,6 +379,7 @@ int amdtee_map_shmem(struct tee_shm *shm)
void amdtee_unmap_shmem(struct tee_shm *shm)
{
+ struct amdtee_context_data *ctxdata;
struct amdtee_shm_data *shmnode;
u32 buf_id;
@@ -381,12 +390,15 @@ void amdtee_unmap_shmem(struct tee_shm *shm)
/* Unmap the shared memory from TEE */
handle_unmap_shmem(buf_id);
- list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
+ ctxdata = shm->ctx->data;
+ mutex_lock(&ctxdata->shm_mutex);
+ list_for_each_entry(shmnode, &ctxdata->shm_list, shm_node)
if (buf_id == shmnode->buf_id) {
list_del(&shmnode->shm_node);
kfree(shmnode);
break;
}
+ mutex_unlock(&ctxdata->shm_mutex);
}
int amdtee_invoke_func(struct tee_context *ctx,
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 5e596168ba73..dcac99f327b0 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <linux/reboot.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
@@ -864,6 +865,17 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
return bgp;
}
+/*
+ * List of SoCs on which the CPU PM notifier can cause erros on the DTEMP
+ * readout.
+ * Enabled notifier on these machines results in erroneous, random values which
+ * could trigger unexpected thermal shutdown.
+ */
+static const struct soc_device_attribute soc_no_cpu_notifier[] = {
+ { .machine = "OMAP4430" },
+ { /* sentinel */ },
+};
+
/*** Device driver call backs ***/
static
@@ -1020,7 +1032,8 @@ int ti_bandgap_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
bgp->nb.notifier_call = bandgap_omap_cpu_notifier;
- cpu_pm_register_notifier(&bgp->nb);
+ if (!soc_device_match(soc_no_cpu_notifier))
+ cpu_pm_register_notifier(&bgp->nb);
#endif
return 0;
@@ -1056,7 +1069,8 @@ int ti_bandgap_remove(struct platform_device *pdev)
struct ti_bandgap *bgp = platform_get_drvdata(pdev);
int i;
- cpu_pm_unregister_notifier(&bgp->nb);
+ if (!soc_device_match(soc_no_cpu_notifier))
+ cpu_pm_unregister_notifier(&bgp->nb);
/* Remove sensor interfaces */
for (i = 0; i < bgp->conf->sensor_count; i++) {
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 0c80a79d7442..c2be7cf91399 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -789,8 +789,10 @@ static int ar933x_uart_probe(struct platform_device *pdev)
goto err_disable_clk;
up->gpios = mctrl_gpio_init(port, 0);
- if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS)
- return PTR_ERR(up->gpios);
+ if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS) {
+ ret = PTR_ERR(up->gpios);
+ goto err_disable_clk;
+ }
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 1731d9728865..cacf7266a262 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -942,8 +942,14 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
struct imx_port *sport = dev_id;
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
irqreturn_t ret = IRQ_NONE;
+ unsigned long flags = 0;
- spin_lock(&sport->port.lock);
+ /*
+ * IRQs might not be disabled upon entering this interrupt handler,
+ * e.g. when interrupt handlers are forced to be threaded. To support
+ * this scenario as well, disable IRQs when acquiring the spinlock.
+ */
+ spin_lock_irqsave(&sport->port.lock, flags);
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
@@ -1013,7 +1019,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
- spin_unlock(&sport->port.lock);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
return ret;
}
@@ -2002,16 +2008,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
unsigned int ucr1;
unsigned long flags = 0;
int locked = 1;
- int retval;
-
- retval = clk_enable(sport->clk_per);
- if (retval)
- return;
- retval = clk_enable(sport->clk_ipg);
- if (retval) {
- clk_disable(sport->clk_per);
- return;
- }
if (sport->port.sysrq)
locked = 0;
@@ -2047,9 +2043,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock_irqrestore(&sport->port.lock, flags);
-
- clk_disable(sport->clk_ipg);
- clk_disable(sport->clk_per);
}
/*
@@ -2150,15 +2143,14 @@ imx_uart_console_setup(struct console *co, char *options)
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
- clk_disable(sport->clk_ipg);
if (retval) {
- clk_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
goto error_console;
}
- retval = clk_prepare(sport->clk_per);
+ retval = clk_prepare_enable(sport->clk_per);
if (retval)
- clk_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
error_console:
return retval;
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index d7d32b656102..358f6048dd3c 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -13,6 +13,7 @@ config VDPA_SIM
depends on RUNTIME_TESTING_MENU && HAS_DMA
select DMA_OPS
select VHOST_RING
+ select GENERIC_NET_UTILS
default n
help
vDPA networking device simulator which loop TX traffic back
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index b22adf03f584..f22fce549862 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -52,7 +52,6 @@
#define VHOST_SCSI_VERSION "v0.1"
#define VHOST_SCSI_NAMELEN 256
#define VHOST_SCSI_MAX_CDB_SIZE 32
-#define VHOST_SCSI_DEFAULT_TAGS 256
#define VHOST_SCSI_PREALLOC_SGLS 2048
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
@@ -140,6 +139,7 @@ struct vhost_scsi_tpg {
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
+ struct list_head tmf_queue;
};
struct vhost_scsi_tport {
@@ -189,6 +189,9 @@ struct vhost_scsi_virtqueue {
* Writers must also take dev mutex and flush under it.
*/
int inflight_idx;
+ struct vhost_scsi_cmd *scsi_cmds;
+ struct sbitmap scsi_tags;
+ int max_cmds;
};
struct vhost_scsi {
@@ -209,6 +212,20 @@ struct vhost_scsi {
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
+struct vhost_scsi_tmf {
+ struct vhost_work vwork;
+ struct vhost_scsi_tpg *tpg;
+ struct vhost_scsi *vhost;
+ struct vhost_scsi_virtqueue *svq;
+ struct list_head queue_entry;
+
+ struct se_cmd se_cmd;
+ struct vhost_scsi_inflight *inflight;
+ struct iovec resp_iov;
+ int in_iovs;
+ int vq_desc;
+};
+
/*
* Context for processing request and control queue operations.
*/
@@ -320,11 +337,13 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
- struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
+ struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
int i;
if (tv_cmd->tvc_sgl_count) {
@@ -336,8 +355,36 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
}
- vhost_scsi_put_inflight(tv_cmd->inflight);
- target_free_tag(se_sess, se_cmd);
+ sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
+ vhost_scsi_put_inflight(inflight);
+}
+
+static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
+{
+ struct vhost_scsi_tpg *tpg = tmf->tpg;
+ struct vhost_scsi_inflight *inflight = tmf->inflight;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ vhost_scsi_put_inflight(inflight);
+}
+
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd,
+ struct vhost_scsi_tmf, se_cmd);
+
+ vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
+ } else {
+ struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+ struct vhost_scsi_cmd, tvc_se_cmd);
+ struct vhost_scsi *vs = cmd->tvc_vhost;
+
+ llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
+ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+ }
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
@@ -362,34 +409,24 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
-static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
-{
- struct vhost_scsi *vs = cmd->tvc_vhost;
-
- llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
-
- vhost_work_queue(&vs->dev, &vs->vs_completion_work);
-}
-
static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{
- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
- struct vhost_scsi_cmd, tvc_se_cmd);
- vhost_scsi_complete_cmd(cmd);
+ transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{
- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
- struct vhost_scsi_cmd, tvc_se_cmd);
- vhost_scsi_complete_cmd(cmd);
+ transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
{
- return;
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
+ se_cmd);
+
+ transport_generic_free_cmd(&tmf->se_cmd, 0);
}
static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
@@ -429,15 +466,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
return evt;
}
-static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
-{
- struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
-
- /* TODO locking against target/backend threads? */
- transport_generic_free_cmd(se_cmd, 0);
-
-}
-
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
@@ -556,7 +584,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
- vhost_scsi_free_cmd(cmd);
+ vhost_scsi_release_cmd_res(se_cmd);
}
vq = -1;
@@ -566,31 +594,31 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
static struct vhost_scsi_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
u32 exp_data_len, int data_direction)
{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *cmd;
struct vhost_scsi_nexus *tv_nexus;
- struct se_session *se_sess;
struct scatterlist *sg, *prot_sg;
struct page **pages;
- int tag, cpu;
+ int tag;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Unable to locate active struct vhost_scsi_nexus\n");
return ERR_PTR(-EIO);
}
- se_sess = tv_nexus->tvn_se_sess;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ tag = sbitmap_get(&svq->scsi_tags, 0, false);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
}
- cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
+ cmd = &svq->scsi_cmds[tag];
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
@@ -599,7 +627,6 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
- cmd->tvc_se_cmd.map_cpu = cpu;
cmd->tvc_tag = scsi_tag;
cmd->tvc_lun = lun;
cmd->tvc_task_attr = task_attr;
@@ -907,6 +934,11 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
return ret;
}
+static u16 vhost_buf_to_lun(u8 *lun_buf)
+{
+ return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
+}
+
static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
@@ -1045,12 +1077,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr;
cdb = &v_req_pi.cdb[0];
- lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
+ lun = vhost_buf_to_lun(v_req_pi.lun);
} else {
tag = vhost64_to_cpu(vq, v_req.tag);
task_attr = v_req.task_attr;
cdb = &v_req.cdb[0];
- lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+ lun = vhost_buf_to_lun(v_req.lun);
}
/*
* Check that the received CDB size does not exceeded our
@@ -1065,11 +1097,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
goto err;
}
- cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
+ cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
exp_data_len + prot_bytes,
data_direction);
if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
+ vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
PTR_ERR(cmd));
goto err;
}
@@ -1088,7 +1120,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
&prot_iter, exp_data_len,
&data_iter))) {
vq_err(vq, "Failed to map iov to sgl\n");
- vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
}
}
@@ -1124,9 +1156,9 @@ out:
}
static void
-vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq,
- struct vhost_scsi_ctx *vc)
+vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ int in_iovs, int vq_desc, struct iovec *resp_iov,
+ int tmf_resp_code)
{
struct virtio_scsi_ctrl_tmf_resp rsp;
struct iov_iter iov_iter;
@@ -1134,17 +1166,87 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ rsp.response = tmf_resp_code;
- iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+ iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
if (likely(ret == sizeof(rsp)))
- vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
}
+static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
+{
+ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
+ vwork);
+ int resp_code;
+
+ if (tmf->se_cmd.se_tmr_req->response == TMR_FUNCTION_COMPLETE)
+ resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ else
+ resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+
+ vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
+ tmf->vq_desc, &tmf->resp_iov, resp_code);
+ vhost_scsi_release_tmf_res(tmf);
+}
+
+static void
+vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
+ struct vhost_virtqueue *vq,
+ struct virtio_scsi_ctrl_tmf_req *vtmf,
+ struct vhost_scsi_ctx *vc)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_tmf *tmf;
+
+ if (vhost32_to_cpu(vq, vtmf->subtype) !=
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
+ goto send_reject;
+
+ if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
+ pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
+ goto send_reject;
+ }
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (list_empty(&tpg->tmf_queue)) {
+ pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ goto send_reject;
+ }
+
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
+ queue_entry);
+ list_del_init(&tmf->queue_entry);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ tmf->tpg = tpg;
+ tmf->vhost = vs;
+ tmf->svq = svq;
+ tmf->resp_iov = vq->iov[vc->out];
+ tmf->vq_desc = vc->head;
+ tmf->in_iovs = vc->in;
+ tmf->inflight = vhost_scsi_get_inflight(vq);
+
+ if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
+ vhost_buf_to_lun(vtmf->lun), NULL,
+ TMR_LUN_RESET, GFP_KERNEL, 0,
+ TARGET_SCF_ACK_KREF) < 0) {
+ vhost_scsi_release_tmf_res(tmf);
+ goto send_reject;
+ }
+
+ return;
+
+send_reject:
+ vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
+ VIRTIO_SCSI_S_FUNCTION_REJECTED);
+}
+
static void
vhost_scsi_send_an_resp(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
@@ -1170,6 +1272,7 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
+ struct vhost_scsi_tpg *tpg;
union {
__virtio32 type;
struct virtio_scsi_ctrl_an_req an;
@@ -1251,12 +1354,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vc.req += typ_size;
vc.req_size -= typ_size;
- ret = vhost_scsi_get_req(vq, &vc, NULL);
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
if (ret)
goto err;
if (v_req.type == VIRTIO_SCSI_T_TMF)
- vhost_scsi_send_tmf_reject(vs, vq, &vc);
+ vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
else
vhost_scsi_send_an_resp(vs, vq, &vc);
err:
@@ -1373,6 +1476,83 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
wait_for_completion(&old_inflight[i]->comp);
}
+static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!svq->scsi_cmds)
+ return;
+
+ for (i = 0; i < svq->max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ kfree(tv_cmd->tvc_sgl);
+ kfree(tv_cmd->tvc_prot_sgl);
+ kfree(tv_cmd->tvc_upages);
+ }
+
+ sbitmap_free(&svq->scsi_tags);
+ kfree(svq->scsi_cmds);
+ svq->scsi_cmds = NULL;
+}
+
+static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (svq->scsi_cmds)
+ return 0;
+
+ if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
+ NUMA_NO_NODE))
+ return -ENOMEM;
+ svq->max_cmds = max_cmds;
+
+ svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
+ if (!svq->scsi_cmds) {
+ sbitmap_free(&svq->scsi_tags);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_sgl) {
+ pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_upages) {
+ pr_err("Unable to allocate tv_cmd->tvc_upages\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_prot_sgl) {
+ pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
+ goto out;
+ }
+ }
+ return 0;
+out:
+ vhost_scsi_destroy_vq_cmds(vq);
+ return -ENOMEM;
+}
+
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
* vhost_scsi_tpg with an active struct vhost_scsi_nexus
@@ -1427,10 +1607,9 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
- kfree(vs_tpg);
mutex_unlock(&tpg->tv_tpg_mutex);
ret = -EEXIST;
- goto out;
+ goto undepend;
}
/*
* In order to ensure individual vhost-scsi configfs
@@ -1442,9 +1621,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
ret = target_depend_item(&se_tpg->tpg_group.cg_item);
if (ret) {
pr_warn("target_depend_item() failed: %d\n", ret);
- kfree(vs_tpg);
mutex_unlock(&tpg->tv_tpg_mutex);
- goto out;
+ goto undepend;
}
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
@@ -1457,6 +1635,16 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
+
+ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ vq = &vs->vqs[i].vq;
+ if (!vhost_vq_is_setup(vq))
+ continue;
+
+ if (vhost_scsi_setup_vq_cmds(vq, vq->num))
+ goto destroy_vq_cmds;
+ }
+
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
@@ -1476,7 +1664,22 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = vs_tpg;
+ goto out;
+destroy_vq_cmds:
+ for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
+ if (!vhost_vq_get_backend(&vs->vqs[i].vq))
+ vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
+ }
+undepend:
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ tpg = vs_tpg[i];
+ if (tpg) {
+ tpg->tv_tpg_vhost_count--;
+ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
+ }
+ }
+ kfree(vs_tpg);
out:
mutex_unlock(&vs->dev.mutex);
mutex_unlock(&vhost_scsi_mutex);
@@ -1549,6 +1752,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
+ /*
+ * Make sure cmds are not running before tearing them
+ * down.
+ */
+ vhost_scsi_flush(vs);
+ vhost_scsi_destroy_vq_cmds(vq);
}
}
/*
@@ -1811,11 +2020,19 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tmf *tmf;
+
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+ if (!tmf)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&tmf->queue_entry);
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
+ list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotplug(tpg, lun);
@@ -1830,11 +2047,16 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tmf *tmf;
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
+ queue_entry);
+ list_del(&tmf->queue_entry);
+ kfree(tmf);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotunplug(tpg, lun);
@@ -1842,23 +2064,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
mutex_unlock(&vhost_scsi_mutex);
}
-static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
-{
- struct vhost_scsi_cmd *tv_cmd;
- unsigned int i;
-
- if (!se_sess->sess_cmd_map)
- return;
-
- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
-
- kfree(tv_cmd->tvc_sgl);
- kfree(tv_cmd->tvc_prot_sgl);
- kfree(tv_cmd->tvc_upages);
- }
-}
-
static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
struct config_item *item, const char *page, size_t count)
{
@@ -1898,45 +2103,6 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
NULL,
};
-static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
- struct se_session *se_sess, void *p)
-{
- struct vhost_scsi_cmd *tv_cmd;
- unsigned int i;
-
- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
-
- tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!tv_cmd->tvc_sgl) {
- pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
- goto out;
- }
-
- tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!tv_cmd->tvc_upages) {
- pr_err("Unable to allocate tv_cmd->tvc_upages\n");
- goto out;
- }
-
- tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!tv_cmd->tvc_prot_sgl) {
- pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
- goto out;
- }
- }
- return 0;
-out:
- vhost_scsi_free_cmd_map_res(se_sess);
- return -ENOMEM;
-}
-
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
@@ -1960,12 +2126,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
- VHOST_SCSI_DEFAULT_TAGS,
- sizeof(struct vhost_scsi_cmd),
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
- (unsigned char *)name, tv_nexus,
- vhost_scsi_nexus_cb);
+ (unsigned char *)name, tv_nexus, NULL);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
@@ -2015,7 +2178,6 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
- vhost_scsi_free_cmd_map_res(se_sess);
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
@@ -2155,6 +2317,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
+ INIT_LIST_HEAD(&tpg->tmf_queue);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5c835a292783..a262e12c6dc2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -304,6 +304,12 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
}
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
+{
+ return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e016cd3fa02f..b063324c7669 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -190,6 +190,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 5bc86f481a78..c8b0ae676809 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1093,7 +1093,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto err1;
}
- fb_virt = ioremap(par->mem->start, screen_fb_size);
+ /*
+ * Map the VRAM cacheable for performance. This is also required for
+ * VM Connect to display properly for ARM64 Linux VM, as the host also
+ * maps the VRAM cacheable.
+ */
+ fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
if (!fb_virt)
goto err2;