summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-05-11 09:06:26 -0700
committerJakub Kicinski <kuba@kernel.org>2023-05-11 09:06:55 -0700
commitbc88ba0cad64a4aa85f9deca79c6f3addcd21795 (patch)
tree66db8412b3f65bf19645611afee4b1dbe14b74a8 /drivers
parent285b2a46953cecea207c53f7c6a7a76c9bbab303 (diff)
parent6e27831b91a0bc572902eb065b374991c1ef452a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes. No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/nbd.c7
-rw-r--r--drivers/block/null_blk/main.c1
-rw-r--r--drivers/block/ublk_drv.c71
-rw-r--r--drivers/block/zram/zram_drv.c1
-rw-r--r--drivers/clk/clk-sp7021.c2
-rw-r--r--drivers/clk/imx/clk-composite-8m.c7
-rw-r--r--drivers/clk/starfive/Kconfig5
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/mailbox/Kconfig2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c2
-rw-r--r--drivers/mailbox/hi6220-mailbox.c5
-rw-r--r--drivers/mailbox/mailbox-test.c8
-rw-r--r--drivers/mailbox/mailbox.c96
-rw-r--r--drivers/mailbox/omap-mailbox.c25
-rw-r--r--drivers/mailbox/pcc.c84
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c11
-rw-r--r--drivers/mailbox/rockchip-mailbox.c2
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/raid5.c45
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c6
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c41
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c21
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/bonding/bond_options.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c2
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/mdio/mdio-mvusb.c11
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h5
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/tap.c4
-rw-r--r--drivers/nvme/host/ioctl.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c11
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c1
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c6
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c24
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c41
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/ipr.c788
-rw-r--r--drivers/scsi/ipr.h64
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c7
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/scsi_debug.c22
-rw-r--r--drivers/ufs/core/ufs-mcq.c6
53 files changed, 455 insertions, 1065 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 34177f1bd97d..bcad9b926b0c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -404,7 +404,6 @@ static int brd_alloc(int i)
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 719a7260c22b..8c2bc47de473 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1283,7 +1283,7 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
- REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
+ REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!octx) {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index d445fd0934bd..9c35c958f2c8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -609,7 +609,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.len = htonl(size);
}
handle = nbd_cmd_handle(cmd);
- memcpy(request.handle, &handle, sizeof(handle));
+ request.cookie = cpu_to_be64(handle);
trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
@@ -621,7 +621,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
trace_nbd_header_sent(req, handle);
if (result < 0) {
if (was_interrupted(result)) {
- /* If we havne't sent anything we can just return BUSY,
+ /* If we haven't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
* completely done.
@@ -735,7 +735,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
u32 tag;
int ret = 0;
- memcpy(&handle, reply->handle, sizeof(handle));
+ handle = be64_to_cpu(reply->cookie);
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -1805,7 +1805,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index b195b8b9fe32..b3fedafe301e 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2144,7 +2144,6 @@ static int null_add_dev(struct nullb_device *dev)
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6b8aa0d030f7..c7331f519750 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -129,6 +129,7 @@ struct ublk_queue {
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
+ bool timeout;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
struct ublk_io ios[];
@@ -898,6 +899,22 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
}
}
+static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+{
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+
+ if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
+ if (!ubq->timeout) {
+ send_sig(SIGKILL, ubq->ubq_daemon, 0);
+ ubq->timeout = true;
+ }
+
+ return BLK_EH_DONE;
+ }
+
+ return BLK_EH_RESET_TIMER;
+}
+
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -957,6 +974,7 @@ static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
.init_hctx = ublk_init_hctx,
.init_request = ublk_init_rq,
+ .timeout = ublk_timeout,
};
static int ublk_ch_open(struct inode *inode, struct file *filp)
@@ -1017,7 +1035,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
}
static void ublk_commit_completion(struct ublk_device *ub,
- struct ublksrv_io_cmd *ub_cmd)
+ const struct ublksrv_io_cmd *ub_cmd)
{
u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
struct ublk_queue *ubq = ublk_get_queue(ub, qid);
@@ -1274,7 +1292,7 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
- struct ublksrv_io_cmd *ub_cmd)
+ const struct ublksrv_io_cmd *ub_cmd)
{
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
@@ -1381,17 +1399,17 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
- struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
- struct ublksrv_io_cmd ub_cmd;
-
/*
* Not necessary for async retry, but let's keep it simple and always
* copy the values to avoid any potential reuse.
*/
- ub_cmd.q_id = READ_ONCE(ub_src->q_id);
- ub_cmd.tag = READ_ONCE(ub_src->tag);
- ub_cmd.result = READ_ONCE(ub_src->result);
- ub_cmd.addr = READ_ONCE(ub_src->addr);
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
+ const struct ublksrv_io_cmd ub_cmd = {
+ .q_id = READ_ONCE(ub_src->q_id),
+ .tag = READ_ONCE(ub_src->tag),
+ .result = READ_ONCE(ub_src->result),
+ .addr = READ_ONCE(ub_src->addr)
+ };
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
@@ -1601,7 +1619,7 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
struct gendisk *disk;
int ret = -EINVAL;
@@ -1664,7 +1682,7 @@ out_unlock:
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
@@ -1715,7 +1733,7 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@@ -1737,6 +1755,18 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
+ /*
+ * unprivileged device can't be trusted, but RECOVERY and
+ * RECOVERY_REISSUE still may hang error handling, so can't
+ * support recovery features for unprivileged ublk now
+ *
+ * TODO: provide forward progress for RECOVERY handler, so that
+ * unprivileged device can benefit from it
+ */
+ if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
+ UBLK_F_USER_RECOVERY);
+
/* the created device is always owned by current user */
ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
@@ -1880,7 +1910,7 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
__func__, cmd->cmd_op, header->dev_id, header->queue_id,
@@ -1899,7 +1929,7 @@ static int ublk_ctrl_stop_dev(struct ublk_device *ub)
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@@ -1930,7 +1960,7 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
static int ublk_ctrl_get_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
@@ -1961,7 +1991,7 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
static int ublk_ctrl_set_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
@@ -2007,6 +2037,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
put_task_struct(ubq->ubq_daemon);
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
ubq->ubq_daemon = NULL;
+ ubq->timeout = false;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -2021,7 +2052,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
int i;
@@ -2063,7 +2094,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
@@ -2130,7 +2161,7 @@ exit:
static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
void __user *argp = (void __user *)(unsigned long)header->addr;
char *dev_path = NULL;
@@ -2209,7 +2240,7 @@ exit:
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
struct ublk_device *ub = NULL;
u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a84c4268257a..f6d90f1ba5cf 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2215,7 +2215,6 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 8fec14120105..11d22043ddd7 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -41,7 +41,7 @@ enum {
/* HIWORD_MASK FIELD_PREP */
#define HWM_FIELD_PREP(mask, value) \
({ \
- u32 _m = mask; \
+ u64 _m = mask; \
(_m << 16) | FIELD_PREP(_m, value); \
})
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 6883a8199b6c..cbf0d7955a00 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -119,17 +119,10 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
return ret;
}
-static int imx8m_clk_divider_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- return clk_divider_ops.determine_rate(hw, req);
-}
-
static const struct clk_ops imx8m_clk_composite_divider_ops = {
.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
.round_rate = imx8m_clk_composite_divider_round_rate,
.set_rate = imx8m_clk_composite_divider_set_rate,
- .determine_rate = imx8m_clk_divider_determine_rate,
};
static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/starfive/Kconfig b/drivers/clk/starfive/Kconfig
index 71c1148ee5f6..5d2333106f13 100644
--- a/drivers/clk/starfive/Kconfig
+++ b/drivers/clk/starfive/Kconfig
@@ -26,7 +26,7 @@ config CLK_STARFIVE_JH7110_SYS
depends on ARCH_STARFIVE || COMPILE_TEST
select AUXILIARY_BUS
select CLK_STARFIVE_JH71X0
- select RESET_STARFIVE_JH7110
+ select RESET_STARFIVE_JH7110 if RESET_CONTROLLER
default ARCH_STARFIVE
help
Say yes here to support the system clock controller on the
@@ -35,9 +35,6 @@ config CLK_STARFIVE_JH7110_SYS
config CLK_STARFIVE_JH7110_AON
tristate "StarFive JH7110 always-on clock support"
depends on CLK_STARFIVE_JH7110_SYS
- select AUXILIARY_BUS
- select CLK_STARFIVE_JH71X0
- select RESET_STARFIVE_JH7110
default m if ARCH_STARFIVE
help
Say yes here to support the always-on clock controller on the
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 83c6dfad77e1..16966cc94e24 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -151,7 +151,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
}
rctx->p_iv[i] = a;
/* we need to setup all others IVs only in the decrypt way */
- if (rctx->op_dir & SS_ENCRYPTION)
+ if (rctx->op_dir == SS_ENCRYPTION)
return 0;
todo = min(len, sg_dma_len(sg));
len -= todo;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index af6b0f5b491d..bc2e265cb02d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -176,7 +176,7 @@ config MAILBOX_TEST
config POLARFIRE_SOC_MAILBOX
tristate "PolarFire SoC (MPFS) Mailbox"
depends on HAS_IOMEM
- depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
help
This driver adds support for the PolarFire SoC (MPFS) mailbox controller.
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 8d3a4c1fe761..8c95e3ce295f 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -1635,7 +1635,7 @@ static struct platform_driver pdc_mbox_driver = {
.remove = pdc_remove,
.driver = {
.name = "brcm-iproc-pdc-mbox",
- .of_match_table = of_match_ptr(pdc_mbox_of_match),
+ .of_match_table = pdc_mbox_of_match,
},
};
module_platform_driver(pdc_mbox_driver);
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index fca61f5312d9..1c73c63598f5 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -325,10 +325,7 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
/* use interrupt for tx's ack */
- if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL))
- mbox->tx_irq_mode = false;
- else
- mbox->tx_irq_mode = true;
+ mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq");
if (mbox->tx_irq_mode)
mbox->controller.txdone_irq = true;
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 4555d678fadd..c4a705c30331 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -12,10 +12,12 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
@@ -38,6 +40,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
+ struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@@ -110,6 +113,8 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
+ mutex_lock(&tdev->mutex);
+
tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->message)
return -ENOMEM;
@@ -144,6 +149,8 @@ out:
kfree(tdev->message);
tdev->signal = NULL;
+ mutex_unlock(&tdev->mutex);
+
return ret < 0 ? ret : count;
}
@@ -392,6 +399,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
+ mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4229b9b5da98..adf36c05fa43 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -317,6 +317,71 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
}
EXPORT_SYMBOL_GPL(mbox_flush);
+static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+{
+ struct device *dev = cl->dev;
+ unsigned long flags;
+ int ret;
+
+ if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
+ dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (chan->mbox->ops->startup) {
+ ret = chan->mbox->ops->startup(chan);
+
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mbox_bind_client - Request a mailbox channel.
+ * @chan: The mailbox channel to bind the client to.
+ * @cl: Identity of the client requesting the channel.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: 0 if the channel was assigned to the client successfully.
+ * <0 for request failure.
+ */
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+{
+ int ret;
+
+ mutex_lock(&con_mutex);
+ ret = __mbox_bind_client(chan, cl);
+ mutex_unlock(&con_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mbox_bind_client);
+
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
@@ -340,7 +405,6 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
- unsigned long flags;
int ret;
if (!dev || !dev->of_node) {
@@ -372,33 +436,9 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
return chan;
}
- if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
- mutex_unlock(&con_mutex);
- return ERR_PTR(-EBUSY);
- }
-
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
-
- spin_unlock_irqrestore(&chan->lock, flags);
-
- if (chan->mbox->ops->startup) {
- ret = chan->mbox->ops->startup(chan);
-
- if (ret) {
- dev_err(dev, "Unable to startup the chan (%d)\n", ret);
- mbox_free_channel(chan);
- chan = ERR_PTR(ret);
- }
- }
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
mutex_unlock(&con_mutex);
return chan;
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 098c82d87137..fa2ce3246b70 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -417,8 +417,6 @@ struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
struct device *dev = cl->dev;
struct omap_mbox *mbox = NULL;
struct omap_mbox_device *mdev;
- struct mbox_chan *chan;
- unsigned long flags;
int ret;
if (!dev)
@@ -441,23 +439,11 @@ struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
if (!mbox || !mbox->chan)
return ERR_PTR(-ENOENT);
- chan = mbox->chan;
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
-
- ret = chan->mbox->ops->startup(chan);
- if (ret) {
- pr_err("Unable to startup the chan (%d)\n", ret);
- mbox_free_channel(chan);
- chan = ERR_PTR(ret);
- }
+ ret = mbox_bind_client(mbox->chan, cl);
+ if (ret)
+ return ERR_PTR(ret);
- return chan;
+ return mbox->chan;
}
EXPORT_SYMBOL(omap_mbox_request_channel);
@@ -763,8 +749,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
finfo->name = child->name;
- if (of_find_property(child, "ti,mbox-send-noirq", NULL))
- finfo->send_no_irq = true;
+ finfo->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 105d46c9801b..a44d4b3e5beb 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -282,8 +282,7 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan;
- struct device *dev;
- unsigned long flags;
+ int rc;
if (subspace_id < 0 || subspace_id >= pcc_chan_count)
return ERR_PTR(-ENOENT);
@@ -294,32 +293,10 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pr_err("Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
- dev = chan->mbox->dev;
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
-
- spin_unlock_irqrestore(&chan->lock, flags);
-
- if (pchan->plat_irq > 0) {
- int rc;
-
- rc = devm_request_irq(dev, pchan->plat_irq, pcc_mbox_irq, 0,
- MBOX_IRQ_NAME, chan);
- if (unlikely(rc)) {
- dev_err(dev, "failed to register PCC interrupt %d\n",
- pchan->plat_irq);
- pcc_mbox_free_channel(&pchan->chan);
- return ERR_PTR(rc);
- }
- }
+ rc = mbox_bind_client(chan, cl);
+ if (rc)
+ return ERR_PTR(rc);
return &pchan->chan;
}
@@ -333,23 +310,12 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
*/
void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
- struct pcc_chan_info *pchan_info = to_pcc_chan_info(pchan);
struct mbox_chan *chan = pchan->mchan;
- unsigned long flags;
if (!chan || !chan->cl)
return;
- if (pchan_info->plat_irq > 0)
- devm_free_irq(chan->mbox->dev, pchan_info->plat_irq, chan);
-
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
-
- spin_unlock_irqrestore(&chan->lock, flags);
+ mbox_free_channel(chan);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
@@ -377,8 +343,48 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
return pcc_chan_reg_read_modify_write(&pchan->db);
}
+/**
+ * pcc_startup - Called from Mailbox Controller code. Used here
+ * to request the interrupt.
+ * @chan: Pointer to Mailbox channel to startup.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_startup(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ int rc;
+
+ if (pchan->plat_irq > 0) {
+ rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
+ MBOX_IRQ_NAME, chan);
+ if (unlikely(rc)) {
+ dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
+ pchan->plat_irq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcc_shutdown - Called from Mailbox Controller code. Used here
+ * to free the interrupt.
+ * @chan: Pointer to Mailbox channel to shutdown.
+ */
+static void pcc_shutdown(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+
+ if (pchan->plat_irq > 0)
+ devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan);
+}
+
static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
+ .startup = pcc_startup,
+ .shutdown = pcc_shutdown,
};
/**
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 6bbf87c6d60b..002a135ee868 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -141,9 +141,7 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
- { .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
@@ -153,15 +151,18 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
- { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
+ /* Do not add any more entries using existing driver data */
+ { .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index 28d7bfee2641..116286ecc5a0 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -248,7 +248,7 @@ static struct platform_driver rockchip_mbox_driver = {
.probe = rockchip_mbox_probe,
.driver = {
.name = "rockchip-mailbox",
- .of_match_table = of_match_ptr(rockchip_mbox_of_match),
+ .of_match_table = rockchip_mbox_of_match,
},
};
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index ba3909bb6bea..7e9d19fd21dd 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -971,7 +971,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
blk_queue_write_cache(q, true, true);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 920bb68156d2..bc8d7565171d 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -219,7 +219,7 @@ static unsigned int optimal_io_size(struct block_device *bdev,
}
static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size,
- sector_t start, sector_t boundary)
+ loff_t start, loff_t boundary)
{
if (io_size != opt_size &&
start + opt_size / SECTOR_SIZE <= boundary)
@@ -237,8 +237,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
- sector_t offset = mddev->bitmap_info.offset;
- sector_t ps, sboff, doff;
+ loff_t sboff, offset = mddev->bitmap_info.offset;
+ sector_t ps, doff;
unsigned int size = PAGE_SIZE;
unsigned int opt_size = PAGE_SIZE;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 812a12e3e41a..4739ed891e75 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6079,6 +6079,38 @@ out_release:
return ret;
}
+/*
+ * If the bio covers multiple data disks, find sector within the bio that has
+ * the lowest chunk offset in the first chunk.
+ */
+static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
+ struct bio *bi)
+{
+ int sectors_per_chunk = conf->chunk_sectors;
+ int raid_disks = conf->raid_disks;
+ int dd_idx;
+ struct stripe_head sh;
+ unsigned int chunk_offset;
+ sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
+ sector_t sector;
+
+ /* We pass in fake stripe_head to get back parity disk numbers */
+ sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
+ chunk_offset = sector_div(sector, sectors_per_chunk);
+ if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
+ return r_sector;
+ /*
+ * Bio crosses to the next data disk. Check whether it's in the same
+ * chunk.
+ */
+ dd_idx++;
+ while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
+ dd_idx++;
+ if (dd_idx >= raid_disks)
+ return r_sector;
+ return r_sector + sectors_per_chunk - chunk_offset;
+}
+
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
md_account_bio(mddev, &bi);
+ /*
+ * Lets start with the stripe with the lowest chunk offset in the first
+ * chunk. That has the best chances of creating IOs adjacent to
+ * previous IOs in case of sequential IO and thus creates the most
+ * sequential IO pattern. We don't bother with the optimization when
+ * reshaping as the performance benefit is not worth the complexity.
+ */
+ if (likely(conf->reshape_progress == MaxSector))
+ logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
+ s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
+
add_wait_queue(&conf->wait_for_overlap, &wait);
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
@@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
continue;
}
- s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
+ s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);
if (s == stripe_cnt)
break;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 75c92e282fa2..19a4a085f73a 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1035,7 +1035,6 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
- const struct mtk_mdp_driver_data *data = mdp->mdp_data;
parent = dev->of_node->parent;
@@ -1045,7 +1044,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
int id, alias_id;
struct mdp_comp *comp;
- of_id = of_match_node(data->mdp_sub_comp_dt_ids, node);
+ of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 238521622b75..253e77189b69 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -378,8 +378,8 @@ static int mxc_isi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_isi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
- SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
+ RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -528,7 +528,7 @@ static struct platform_driver mxc_isi_driver = {
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
- .pm = &mxc_isi_pm_ops,
+ .pm = pm_ptr(&mxc_isi_pm_ops),
}
};
module_platform_driver(mxc_isi_driver);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index db538f3d88ec..19e80b95ffea 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -29,11 +29,10 @@ static inline void mxc_isi_write(struct mxc_isi_pipe *pipe, u32 reg, u32 val)
void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr)
{
- mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, dma_addr);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, lower_32_bits(dma_addr));
if (pipe->isi->pdata->has_36bit_dma)
- mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR, dma_addr >> 32);
-#endif
+ mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR,
+ upper_32_bits(dma_addr));
}
void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
@@ -45,34 +44,36 @@ void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
val = mxc_isi_read(pipe, CHNL_OUT_BUF_CTRL);
if (buf_id == MXC_ISI_BUF1) {
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y, dma_addrs[0]);
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U, dma_addrs[1]);
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V, dma_addrs[2]);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y,
+ lower_32_bits(dma_addrs[0]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U,
+ lower_32_bits(dma_addrs[1]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V,
+ lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF1_XTND_ADDR,
- dma_addrs[0] >> 32);
+ upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF1_XTND_ADDR,
- dma_addrs[1] >> 32);
+ upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF1_XTND_ADDR,
- dma_addrs[2] >> 32);
+ upper_32_bits(dma_addrs[2]));
}
-#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR;
} else {
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y, dma_addrs[0]);
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U, dma_addrs[1]);
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V, dma_addrs[2]);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y,
+ lower_32_bits(dma_addrs[0]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U,
+ lower_32_bits(dma_addrs[1]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V,
+ lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF2_XTND_ADDR,
- dma_addrs[0] >> 32);
+ upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF2_XTND_ADDR,
- dma_addrs[1] >> 32);
+ upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF2_XTND_ADDR,
- dma_addrs[2] >> 32);
+ upper_32_bits(dma_addrs[2]));
}
-#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR;
}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 98bfd445a649..2a77353f10b5 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -728,11 +728,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
- vnmc = VNMC_IM_ODD_EVEN;
- progressive = true;
- break;
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
break;
default:
vnmc = VNMC_IM_ODD;
@@ -1312,12 +1310,23 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
}
if (rvin_scaler_needed(vin)) {
+ /* Gen3 can't scale NV12 */
+ if (vin->info->model == RCAR_GEN3 &&
+ vin->format.pixelformat == V4L2_PIX_FMT_NV12)
+ return -EPIPE;
+
if (!vin->scaler)
return -EPIPE;
} else {
- if (fmt.format.width != vin->format.width ||
- fmt.format.height != vin->format.height)
- return -EPIPE;
+ if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
+ if (ALIGN(fmt.format.width, 32) != vin->format.width ||
+ ALIGN(fmt.format.height, 32) != vin->format.height)
+ return -EPIPE;
+ } else {
+ if (fmt.format.width != vin->format.width ||
+ fmt.format.height != vin->format.height)
+ return -EPIPE;
+ }
}
if (fmt.format.code != vin->mbus_code)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c2d080fc4fc4..27cbe148f0db 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -84,6 +84,11 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* Limit the max delay range to 300s */
+static struct netlink_range_validation delay_range = {
+ .max = 300000,
+};
+
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@@ -114,7 +119,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
- [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 0498fc6731f8..f3f27f0bd2a6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -169,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 300000, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
+};
+
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@@ -488,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
- .values = bond_intmax_tbl,
+ .values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 57ce74315eba..caa00c72aeeb 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -294,19 +294,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- /* Clear PCI MSI-X Pending Bit Array (PBA)
- *
- * This bit is set if an interrupt event occurs while the vector is
- * masked. If this bit is set and we reenable the interrupt, it will
- * fire again. Since we're just about to poll the queue state, we don't
- * need it to fire again.
- *
- * Under high softirq load, it's possible that the interrupt condition
- * is triggered twice before we got the chance to process it.
- */
- gve_write_irq_doorbell_dqo(priv, block,
- GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
-
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 4c205afbd230..985cff910f30 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -654,7 +654,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
BIT(hw->index), BIT(hw->index));
}
- if (!hw_list[!hw->index]->wed_dev &&
+ if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
hw->eth->dma_dev != hw->eth->dev)
mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index ef6fd3f6be30..5595bfe84bbb 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -307,15 +307,15 @@ static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
- REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
- REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
- REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
- REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
- REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
- REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
- REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
- REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
- REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 4538f334df57..d3c5306f1c41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -181,6 +181,7 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
+#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index afaec3fb9ab6..03b1c5a97826 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -25,6 +25,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
+ u32 clk_rate;
value |= GMAC_CORE_INIT;
@@ -47,6 +48,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_CONFIG);
+ /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+ writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
+
/* Enable GMAC interrupts */
value = GMAC_INT_DEFAULT_ENABLE;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 460b3d4f2245..ab5133eb1d51 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -436,6 +436,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
+
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@@ -474,6 +477,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index 68fc55906e78..554837c21e73 100644
--- a/drivers/net/mdio/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
@@ -67,6 +67,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
struct device *dev = &interface->dev;
struct mvusb_mdio *mvusb;
struct mii_bus *mdio;
+ int ret;
mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
if (!mdio)
@@ -87,7 +88,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
mdio->write = mvusb_mdio_write;
usb_set_intfdata(interface, mvusb);
- return of_mdiobus_register(mdio, dev->of_node);
+ ret = of_mdiobus_register(mdio, dev->of_node);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ usb_put_dev(mvusb->udev);
+ return ret;
}
static void mvusb_mdio_disconnect(struct usb_interface *interface)
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 539cd43eae8d..f19d48c94fe0 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -1203,7 +1203,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
- .num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
+ .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
.an_mode = DW_2500BASEX,
},
};
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 9902fb182099..729db441797a 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -40,6 +40,11 @@ static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
}
+static inline int bcm_phy_read_exp_sel(struct phy_device *phydev, u16 reg)
+{
+ return bcm_phy_read_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER);
+}
+
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 06be71ecd2f8..f8c17a253f8b 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -486,7 +486,7 @@ static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
/* Read CORE_EXPA9 */
- tmp = bcm_phy_read_exp(phydev, 0x00a9);
+ tmp = bcm_phy_read_exp_sel(phydev, 0x00a9);
/* CORE_EXPA9[6:1] is rcalcode[5:0] */
rcalcode = (tmp & 0x7e) / 2;
/* Correct RCAL code + 1 is -1% rprogr, LP: +16 */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index ce993cc75bf3..d30d730ed5a7 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -742,7 +742,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
/* copy skb_ubuf_info for callback when skb has no error */
@@ -1197,7 +1197,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d24ea2e05156..81c5c9e38477 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -552,7 +552,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- const struct nvme_uring_cmd *cmd = ioucmd->cmd;
+ const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
struct nvme_uring_data d;
struct nvme_command c;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 91a077c35b8b..a79318e90a13 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -784,7 +784,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
fifo = vring->fifo;
/* Return if vdev is not ready. */
- if (!fifo->vdev[devid])
+ if (!fifo || !fifo->vdev[devid])
return;
/* Return if another vring is running. */
@@ -980,9 +980,13 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
vq->num_max = vring->num;
+ vq->priv = vring;
+
+ /* Make vq update visible before using it. */
+ virtio_mb(false);
+
vqs[i] = vq;
vring->vq = vq;
- vq->priv = vring;
}
return 0;
@@ -1302,6 +1306,9 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+ /* Make all updates visible before setting the 'is_ready' flag. */
+ virtio_mb(false);
+
fifo->is_ready = true;
return 0;
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 873f59c3e280..6364ae262705 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -211,6 +211,7 @@ struct bios_rfkill2_state {
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x270, { KEY_MICMUTE } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 1a300e14f350..064f186ae81b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
int min_max)
{
unsigned int input;
+ int ret;
if (kstrtouint(buf, 10, &input))
return -EINVAL;
mutex_lock(&uncore_lock);
- uncore_write(data, input, min_max);
+ ret = uncore_write(data, input, min_max);
mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
return count;
}
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
index 80abc708e4f2..d904fad499aa 100644
--- a/drivers/platform/x86/intel_scu_pcidrv.c
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -34,6 +34,7 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x082a) },
{ PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
{ PCI_VDEVICE(INTEL, 0x11a0) },
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 6fe82f805ea8..b3808ad77278 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10318,6 +10318,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
static int dytc_capabilities;
static bool dytc_mmc_get_available;
+static int profile_force;
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
@@ -10580,6 +10581,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (err)
return err;
+ /* Check if user wants to override the profile selection */
+ if (profile_force) {
+ switch (profile_force) {
+ case -1:
+ dytc_capabilities = 0;
+ break;
+ case 1:
+ dytc_capabilities = BIT(DYTC_FC_MMC);
+ break;
+ case 2:
+ dytc_capabilities = BIT(DYTC_FC_PSC);
+ break;
+ }
+ pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
+ }
if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
pr_debug("MMC is supported\n");
/*
@@ -10593,11 +10609,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
dytc_mmc_get_available = true;
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
- /* Support for this only works on AMD platforms */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
- dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
- return -ENODEV;
- }
pr_debug("PSC is supported\n");
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
@@ -11646,6 +11657,9 @@ MODULE_PARM_DESC(uwb_state,
"Initial state of the emulated UWB switch");
#endif
+module_param(profile_force, int, 0444);
+MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
+
static void thinkpad_acpi_module_exit(void)
{
struct ibm_struct *ibm, *itmp;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 13802a3c3591..68e66b60445c 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
+static const struct property_entry dexp_ursus_kx210i_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data dexp_ursus_kx210i_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_kx210i_props,
+};
+
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.properties = gdix1001_upside_down_props,
};
+static const struct ts_dmi_data gdix1002_00_upside_down_data = {
+ .acpi_name = "GDIX1002:00",
+ .properties = gdix1001_upside_down_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -1186,6 +1207,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* DEXP Ursus KX210i */
+ .driver_data = (void *)&dexp_ursus_kx210i_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
+ },
+ },
+ {
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
.matches = {
@@ -1296,6 +1325,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Juno Tablet */
+ .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ /* Both product- and board-name being "Default string" is somewhat rare */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ /* Above matches are too generic, add partial bios-version match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
+ },
+ },
+ {
/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 03e71e3d5e5b..0704809d9d99 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -971,8 +971,7 @@ config SCSI_SYM53C8XX_MMIO
config SCSI_IPR
tristate "IBM Power Linux RAID adapter support"
- depends on PCI && SCSI && ATA
- select SATA_HOST
+ depends on PCI && SCSI
select FW_LOADER
select IRQ_POLL
select SGL_ALLOC
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 4d3c280a7360..4e13797b2a4a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -58,7 +58,6 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/reboot.h>
#include <linux/stringify.h>
@@ -595,10 +594,6 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
- if (ipr_cmd->ioa_cfg->sis64)
- trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
- else
- trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
@@ -636,7 +631,6 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
- struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
int hrrq_id;
@@ -651,18 +645,15 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
if (ipr_cmd->ioa_cfg->sis64) {
ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
- ioasa64->u.gata.status = 0;
} else {
ioarcb->write_ioadl_addr =
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- ioasa->u.gata.status = 0;
}
ioasa->hdr.ioasc = 0;
ioasa->hdr.residual_data_len = 0;
ipr_cmd->scsi_cmd = NULL;
- ipr_cmd->qc = NULL;
ipr_cmd->sense_buffer[0] = 0;
ipr_cmd->dma_use_sg = 0;
}
@@ -806,48 +797,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
return 0;
}
-/**
- * __ipr_sata_eh_done - done function for aborted SATA commands
- * @ipr_cmd: ipr command struct
- *
- * This function is invoked for ops generated to SATA
- * devices which are being aborted.
- *
- * Return value:
- * none
- **/
-static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
-{
- struct ata_queued_cmd *qc = ipr_cmd->qc;
- struct ipr_sata_port *sata_port = qc->ap->private_data;
-
- qc->err_mask |= AC_ERR_OTHER;
- sata_port->ioasa.status |= ATA_BUSY;
- ata_qc_complete(qc);
- if (ipr_cmd->eh_comp)
- complete(ipr_cmd->eh_comp);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
-}
-
-/**
- * ipr_sata_eh_done - done function for aborted SATA commands
- * @ipr_cmd: ipr command struct
- *
- * This function is invoked for ops generated to SATA
- * devices which are being aborted.
- *
- * Return value:
- * none
- **/
-static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
-{
- struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
- unsigned long hrrq_flags;
-
- spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
- __ipr_sata_eh_done(ipr_cmd);
- spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
-}
/**
* __ipr_scsi_eh_done - mid-layer done function for aborted ops
@@ -920,8 +869,6 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
if (ipr_cmd->scsi_cmd)
ipr_cmd->done = __ipr_scsi_eh_done;
- else if (ipr_cmd->qc)
- ipr_cmd->done = __ipr_sata_eh_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
IPR_IOASC_IOA_WAS_RESET);
@@ -1143,31 +1090,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
}
/**
- * ipr_update_ata_class - Update the ata class in the resource entry
- * @res: resource entry struct
- * @proto: cfgte device bus protocol value
- *
- * Return value:
- * none
- **/
-static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
-{
- switch (proto) {
- case IPR_PROTO_SATA:
- case IPR_PROTO_SAS_STP:
- res->ata_class = ATA_DEV_ATA;
- break;
- case IPR_PROTO_SATA_ATAPI:
- case IPR_PROTO_SAS_STP_ATAPI:
- res->ata_class = ATA_DEV_ATAPI;
- break;
- default:
- res->ata_class = ATA_DEV_UNKNOWN;
- break;
- }
-}
-
-/**
* ipr_init_res_entry - Initialize a resource entry struct.
* @res: resource entry struct
* @cfgtew: config table entry wrapper struct
@@ -1179,7 +1101,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
struct ipr_config_table_entry_wrapper *cfgtew)
{
int found = 0;
- unsigned int proto;
struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
struct ipr_resource_entry *gscsi_res = NULL;
@@ -1190,10 +1111,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->resetting_device = 0;
res->reset_occurred = 0;
res->sdev = NULL;
- res->sata_port = NULL;
if (ioa_cfg->sis64) {
- proto = cfgtew->u.cfgte64->proto;
res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
res->qmodel = IPR_QUEUEING_MODEL64(res);
@@ -1239,7 +1158,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
set_bit(res->target, ioa_cfg->target_ids);
}
} else {
- proto = cfgtew->u.cfgte->proto;
res->qmodel = IPR_QUEUEING_MODEL(res);
res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1252,8 +1170,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->lun = cfgtew->u.cfgte->res_addr.lun;
res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
}
-
- ipr_update_ata_class(res, proto);
}
/**
@@ -1339,7 +1255,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
struct ipr_config_table_entry_wrapper *cfgtew)
{
char buffer[IPR_MAX_RES_PATH_LENGTH];
- unsigned int proto;
int new_path = 0;
if (res->ioa_cfg->sis64) {
@@ -1351,7 +1266,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
sizeof(struct ipr_std_inq_data));
res->qmodel = IPR_QUEUEING_MODEL64(res);
- proto = cfgtew->u.cfgte64->proto;
res->res_handle = cfgtew->u.cfgte64->res_handle;
res->dev_id = cfgtew->u.cfgte64->dev_id;
@@ -1380,11 +1294,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
sizeof(struct ipr_std_inq_data));
res->qmodel = IPR_QUEUEING_MODEL(res);
- proto = cfgtew->u.cfgte->proto;
res->res_handle = cfgtew->u.cfgte->res_handle;
}
-
- ipr_update_ata_class(res, proto);
}
/**
@@ -4496,17 +4407,6 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
**/
static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
- struct ipr_resource_entry *res;
- unsigned long lock_flags = 0;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- res = (struct ipr_resource_entry *)sdev->hostdata;
-
- if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
- qdepth = IPR_MAX_CMD_PER_ATA_LUN;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
scsi_change_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
@@ -4799,68 +4699,13 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
return NULL;
}
-static struct ata_port_info sata_port_info;
-
-/**
- * ipr_target_alloc - Prepare for commands to a SCSI target
- * @starget: scsi target struct
- *
- * If the device is a SATA device, this function allocates an
- * ATA port with libata, else it does nothing.
- *
- * Return value:
- * 0 on success / non-0 on failure
- **/
-static int ipr_target_alloc(struct scsi_target *starget)
-{
- struct Scsi_Host *shost = dev_to_shost(&starget->dev);
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
- struct ipr_sata_port *sata_port;
- struct ata_port *ap;
- struct ipr_resource_entry *res;
- unsigned long lock_flags;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- res = ipr_find_starget(starget);
- starget->hostdata = NULL;
-
- if (res && ipr_is_gata(res)) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
- if (!sata_port)
- return -ENOMEM;
-
- ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
- if (ap) {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- sata_port->ioa_cfg = ioa_cfg;
- sata_port->ap = ap;
- sata_port->res = res;
-
- res->sata_port = sata_port;
- ap->private_data = sata_port;
- starget->hostdata = sata_port;
- } else {
- kfree(sata_port);
- return -ENOMEM;
- }
- }
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- return 0;
-}
-
/**
* ipr_target_destroy - Destroy a SCSI target
* @starget: scsi target struct
*
- * If the device was a SATA device, this function frees the libata
- * ATA port, else it does nothing.
- *
**/
static void ipr_target_destroy(struct scsi_target *starget)
{
- struct ipr_sata_port *sata_port = starget->hostdata;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
@@ -4874,12 +4719,6 @@ static void ipr_target_destroy(struct scsi_target *starget)
clear_bit(starget->id, ioa_cfg->target_ids);
}
}
-
- if (sata_port) {
- starget->hostdata = NULL;
- ata_sas_port_destroy(sata_port->ap);
- kfree(sata_port);
- }
}
/**
@@ -4922,11 +4761,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *) sdev->hostdata;
if (res) {
- if (res->sata_port)
- res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
sdev->hostdata = NULL;
res->sdev = NULL;
- res->sata_port = NULL;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
@@ -4944,7 +4780,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
- struct ata_port *ap = NULL;
unsigned long lock_flags = 0;
char buffer[IPR_MAX_RES_PATH_LENGTH];
@@ -4964,15 +4799,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
IPR_VSET_RW_TIMEOUT);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
- if (ipr_is_gata(res) && res->sata_port)
- ap = res->sata_port->ap;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ap) {
- scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
- ata_sas_slave_configure(sdev, ap);
- }
-
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
ipr_format_res_path(ioa_cfg,
@@ -4984,37 +4812,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
}
/**
- * ipr_ata_slave_alloc - Prepare for commands to a SATA device
- * @sdev: scsi device struct
- *
- * This function initializes an ATA port so that future commands
- * sent through queuecommand will work.
- *
- * Return value:
- * 0 on success
- **/
-static int ipr_ata_slave_alloc(struct scsi_device *sdev)
-{
- struct ipr_sata_port *sata_port = NULL;
- int rc = -ENXIO;
-
- ENTER;
- if (sdev->sdev_target)
- sata_port = sdev->sdev_target->hostdata;
- if (sata_port) {
- rc = ata_sas_port_init(sata_port->ap);
- if (rc == 0)
- rc = ata_sas_sync_probe(sata_port->ap);
- }
-
- if (rc)
- ipr_slave_destroy(sdev);
-
- LEAVE;
- return rc;
-}
-
-/**
* ipr_slave_alloc - Prepare for commands to a device.
* @sdev: scsi device struct
*
@@ -5047,8 +4844,10 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
res->needs_sync_complete = 1;
rc = 0;
if (ipr_is_gata(res)) {
+ sdev_printk(KERN_ERR, sdev, "SATA devices are no longer "
+ "supported by this driver. Skipping device.\n");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return ipr_ata_slave_alloc(sdev);
+ return -ENXIO;
}
}
@@ -5092,23 +4891,6 @@ static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_match_res - Match function for specified resource entry
- * @ipr_cmd: ipr command struct
- * @resource: resource entry to match
- *
- * Returns:
- * 1 if command matches sdev / 0 if command does not match sdev
- **/
-static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
-{
- struct ipr_resource_entry *res = resource;
-
- if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
- return 1;
- return 0;
-}
-
-/**
* ipr_wait_for_ops - Wait for matching commands to complete
* @ioa_cfg: ioa config struct
* @device: device to match (sdev)
@@ -5220,8 +5002,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
* This function issues a device reset to the affected device.
* If the device is a SCSI device, a LUN reset will be sent
* to the device first. If that does not work, a target reset
- * will be sent. If the device is a SATA device, a PHY reset will
- * be sent.
+ * will be sent.
*
* Return value:
* 0 on success / non-zero on failure
@@ -5232,7 +5013,6 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
struct ipr_cmd_pkt *cmd_pkt;
- struct ipr_ioarcb_ata_regs *regs;
u32 ioasc;
ENTER;
@@ -5240,87 +5020,22 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt;
- if (ipr_cmd->ioa_cfg->sis64) {
- regs = &ipr_cmd->i.ata_ioadl.regs;
+ if (ipr_cmd->ioa_cfg->sis64)
ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
- } else
- regs = &ioarcb->u.add_data.u.regs;
ioarcb->res_handle = res->res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
- if (ipr_is_gata(res)) {
- cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
- ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
- regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
- }
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
- if (ipr_cmd->ioa_cfg->sis64)
- memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
- sizeof(struct ipr_ioasa_gata));
- else
- memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
- sizeof(struct ipr_ioasa_gata));
- }
LEAVE;
return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
}
/**
- * ipr_sata_reset - Reset the SATA port
- * @link: SATA link to reset
- * @classes: class of the attached device
- * @deadline: unused
- *
- * This function issues a SATA phy reset to the affected ATA link.
- *
- * Return value:
- * 0 on success / non-zero on failure
- **/
-static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
- unsigned long deadline)
-{
- struct ipr_sata_port *sata_port = link->ap->private_data;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_resource_entry *res;
- unsigned long lock_flags = 0;
- int rc = -ENXIO, ret;
-
- ENTER;
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while (ioa_cfg->in_reset_reload) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- }
-
- res = sata_port->res;
- if (res) {
- rc = ipr_device_reset(ioa_cfg, res);
- *classes = res->ata_class;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
- if (ret != SUCCESS) {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- }
- } else
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- LEAVE;
- return rc;
-}
-
-/**
* __ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct
*
@@ -5333,12 +5048,9 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
**/
static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
{
- struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
- struct ata_port *ap;
- int rc = 0, i;
- struct ipr_hrr_queue *hrrq;
+ int rc = 0;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -5354,36 +5066,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
- for_each_hrrq(hrrq, ioa_cfg) {
- spin_lock(&hrrq->_lock);
- for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
- ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
-
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (!ipr_cmd->qc)
- continue;
- if (ipr_cmnd_is_free(ipr_cmd))
- continue;
-
- ipr_cmd->done = ipr_sata_eh_done;
- if (!(ipr_cmd->qc->flags & ATA_QCFLAG_EH)) {
- ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_EH;
- }
- }
- }
- spin_unlock(&hrrq->_lock);
- }
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
- if (ipr_is_gata(res) && res->sata_port) {
- ap = res->sata_port->ap;
- spin_unlock_irq(scsi_cmd->device->host->host_lock);
- ata_std_error_handler(ap);
- spin_lock_irq(scsi_cmd->device->host->host_lock);
- } else
- rc = ipr_device_reset(ioa_cfg, res);
+ rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
res->reset_occurred = 1;
@@ -5407,12 +5093,8 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
- if (rc == SUCCESS) {
- if (ipr_is_gata(res) && res->sata_port)
- rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
- else
- rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
- }
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
return rc;
}
@@ -6564,7 +6246,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- unsigned long hrrq_flags, lock_flags;
+ unsigned long hrrq_flags;
int rc;
struct ipr_hrr_queue *hrrq;
int hrrq_id;
@@ -6574,13 +6256,6 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
- if (ipr_is_gata(res) && res->sata_port) {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return rc;
- }
-
hrrq_id = ipr_get_hrrq_index(ioa_cfg);
hrrq = &ioa_cfg->hrrq[hrrq_id];
@@ -6691,30 +6366,6 @@ err_nodev:
}
/**
- * ipr_ioctl - IOCTL handler
- * @sdev: scsi device struct
- * @cmd: IOCTL cmd
- * @arg: IOCTL arg
- *
- * Return value:
- * 0 on success / other on failure
- **/
-static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg)
-{
- struct ipr_resource_entry *res;
-
- res = (struct ipr_resource_entry *)sdev->hostdata;
- if (res && ipr_is_gata(res)) {
- if (cmd == HDIO_GET_IDENTITY)
- return -ENOTTY;
- return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
- }
-
- return -EINVAL;
-}
-
-/**
* ipr_ioa_info - Get information about the card/driver
* @host: scsi host struct
*
@@ -6740,12 +6391,7 @@ static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IPR",
.info = ipr_ioa_info,
- .ioctl = ipr_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ipr_ioctl,
-#endif
.queuecommand = ipr_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
@@ -6753,7 +6399,6 @@ static const struct scsi_host_template driver_template = {
.slave_configure = ipr_slave_configure,
.slave_destroy = ipr_slave_destroy,
.scan_finished = ipr_scan_finished,
- .target_alloc = ipr_target_alloc,
.target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
.bios_param = ipr_biosparam,
@@ -6767,418 +6412,6 @@ static const struct scsi_host_template driver_template = {
.proc_name = IPR_NAME,
};
-/**
- * ipr_ata_phy_reset - libata phy_reset handler
- * @ap: ata port to reset
- *
- **/
-static void ipr_ata_phy_reset(struct ata_port *ap)
-{
- unsigned long flags;
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_resource_entry *res = sata_port->res;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- int rc;
-
- ENTER;
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while (ioa_cfg->in_reset_reload) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- }
-
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
- goto out_unlock;
-
- rc = ipr_device_reset(ioa_cfg, res);
-
- if (rc) {
- ap->link.device[0].class = ATA_DEV_NONE;
- goto out_unlock;
- }
-
- ap->link.device[0].class = res->ata_class;
- if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
- ap->link.device[0].class = ATA_DEV_NONE;
-
-out_unlock:
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- LEAVE;
-}
-
-/**
- * ipr_ata_post_internal - Cleanup after an internal command
- * @qc: ATA queued command
- *
- * Return value:
- * none
- **/
-static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
-{
- struct ipr_sata_port *sata_port = qc->ap->private_data;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_cmnd *ipr_cmd;
- struct ipr_hrr_queue *hrrq;
- unsigned long flags;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while (ioa_cfg->in_reset_reload) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- }
-
- for_each_hrrq(hrrq, ioa_cfg) {
- spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (ipr_cmd->qc == qc) {
- ipr_device_reset(ioa_cfg, sata_port->res);
- break;
- }
- }
- spin_unlock(&hrrq->_lock);
- }
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
-}
-
-/**
- * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
- * @regs: destination
- * @tf: source ATA taskfile
- *
- * Return value:
- * none
- **/
-static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
- struct ata_taskfile *tf)
-{
- regs->feature = tf->feature;
- regs->nsect = tf->nsect;
- regs->lbal = tf->lbal;
- regs->lbam = tf->lbam;
- regs->lbah = tf->lbah;
- regs->device = tf->device;
- regs->command = tf->command;
- regs->hob_feature = tf->hob_feature;
- regs->hob_nsect = tf->hob_nsect;
- regs->hob_lbal = tf->hob_lbal;
- regs->hob_lbam = tf->hob_lbam;
- regs->hob_lbah = tf->hob_lbah;
- regs->ctl = tf->ctl;
-}
-
-/**
- * ipr_sata_done - done function for SATA commands
- * @ipr_cmd: ipr command struct
- *
- * This function is invoked by the interrupt handler for
- * ops generated by the SCSI mid-layer to SATA devices
- *
- * Return value:
- * none
- **/
-static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
-{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- struct ata_queued_cmd *qc = ipr_cmd->qc;
- struct ipr_sata_port *sata_port = qc->ap->private_data;
- struct ipr_resource_entry *res = sata_port->res;
- u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-
- spin_lock(&ipr_cmd->hrrq->_lock);
- if (ipr_cmd->ioa_cfg->sis64)
- memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
- sizeof(struct ipr_ioasa_gata));
- else
- memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
- sizeof(struct ipr_ioasa_gata));
- ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
-
- if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
- scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
-
- if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
- qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
- else
- qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- spin_unlock(&ipr_cmd->hrrq->_lock);
- ata_qc_complete(qc);
-}
-
-/**
- * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
- * @ipr_cmd: ipr command struct
- * @qc: ATA queued command
- *
- **/
-static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
- struct ata_queued_cmd *qc)
-{
- u32 ioadl_flags = 0;
- struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
- struct ipr_ioadl64_desc *last_ioadl64 = NULL;
- int len = qc->nbytes;
- struct scatterlist *sg;
- unsigned int si;
- dma_addr_t dma_addr = ipr_cmd->dma_addr;
-
- if (len == 0)
- return;
-
- if (qc->dma_dir == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- } else if (qc->dma_dir == DMA_FROM_DEVICE)
- ioadl_flags = IPR_IOADL_FLAGS_READ;
-
- ioarcb->data_transfer_length = cpu_to_be32(len);
- ioarcb->ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
- ioarcb->u.sis64_addr_data.data_ioadl_addr =
- cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
-
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ioadl64->flags = cpu_to_be32(ioadl_flags);
- ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
- ioadl64->address = cpu_to_be64(sg_dma_address(sg));
-
- last_ioadl64 = ioadl64;
- ioadl64++;
- }
-
- if (likely(last_ioadl64))
- last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
-}
-
-/**
- * ipr_build_ata_ioadl - Build an ATA scatter/gather list
- * @ipr_cmd: ipr command struct
- * @qc: ATA queued command
- *
- **/
-static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
- struct ata_queued_cmd *qc)
-{
- u32 ioadl_flags = 0;
- struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
- struct ipr_ioadl_desc *last_ioadl = NULL;
- int len = qc->nbytes;
- struct scatterlist *sg;
- unsigned int si;
-
- if (len == 0)
- return;
-
- if (qc->dma_dir == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->data_transfer_length = cpu_to_be32(len);
- ioarcb->ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- } else if (qc->dma_dir == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(len);
- ioarcb->read_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- }
-
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
- ioadl->address = cpu_to_be32(sg_dma_address(sg));
-
- last_ioadl = ioadl;
- ioadl++;
- }
-
- if (likely(last_ioadl))
- last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
-}
-
-/**
- * ipr_qc_defer - Get a free ipr_cmd
- * @qc: queued command
- *
- * Return value:
- * 0 if success
- **/
-static int ipr_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_cmnd *ipr_cmd;
- struct ipr_hrr_queue *hrrq;
- int hrrq_id;
-
- hrrq_id = ipr_get_hrrq_index(ioa_cfg);
- hrrq = &ioa_cfg->hrrq[hrrq_id];
-
- qc->lldd_task = NULL;
- spin_lock(&hrrq->_lock);
- if (unlikely(hrrq->ioa_is_dead)) {
- spin_unlock(&hrrq->_lock);
- return 0;
- }
-
- if (unlikely(!hrrq->allow_cmds)) {
- spin_unlock(&hrrq->_lock);
- return ATA_DEFER_LINK;
- }
-
- ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
- if (ipr_cmd == NULL) {
- spin_unlock(&hrrq->_lock);
- return ATA_DEFER_LINK;
- }
-
- qc->lldd_task = ipr_cmd;
- spin_unlock(&hrrq->_lock);
- return 0;
-}
-
-/**
- * ipr_qc_issue - Issue a SATA qc to a device
- * @qc: queued command
- *
- * Return value:
- * 0 if success
- **/
-static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_resource_entry *res = sata_port->res;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_cmnd *ipr_cmd;
- struct ipr_ioarcb *ioarcb;
- struct ipr_ioarcb_ata_regs *regs;
-
- if (qc->lldd_task == NULL)
- ipr_qc_defer(qc);
-
- ipr_cmd = qc->lldd_task;
- if (ipr_cmd == NULL)
- return AC_ERR_SYSTEM;
-
- qc->lldd_task = NULL;
- spin_lock(&ipr_cmd->hrrq->_lock);
- if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
- ipr_cmd->hrrq->ioa_is_dead)) {
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- spin_unlock(&ipr_cmd->hrrq->_lock);
- return AC_ERR_SYSTEM;
- }
-
- ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
- ioarcb = &ipr_cmd->ioarcb;
-
- if (ioa_cfg->sis64) {
- regs = &ipr_cmd->i.ata_ioadl.regs;
- ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
- } else
- regs = &ioarcb->u.add_data.u.regs;
-
- memset(regs, 0, sizeof(*regs));
- ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
-
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
- ipr_cmd->qc = qc;
- ipr_cmd->done = ipr_sata_done;
- ipr_cmd->ioarcb.res_handle = res->res_handle;
- ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
- ipr_cmd->dma_use_sg = qc->n_elem;
-
- if (ioa_cfg->sis64)
- ipr_build_ata_ioadl64(ipr_cmd, qc);
- else
- ipr_build_ata_ioadl(ipr_cmd, qc);
-
- regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
- ipr_copy_sata_tf(regs, &qc->tf);
- memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
- ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
-
- switch (qc->tf.protocol) {
- case ATA_PROT_NODATA:
- case ATA_PROT_PIO:
- break;
-
- case ATA_PROT_DMA:
- regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
- break;
-
- case ATAPI_PROT_PIO:
- case ATAPI_PROT_NODATA:
- regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
- break;
-
- case ATAPI_PROT_DMA:
- regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
- regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
- break;
-
- default:
- WARN_ON(1);
- spin_unlock(&ipr_cmd->hrrq->_lock);
- return AC_ERR_INVALID;
- }
-
- ipr_send_command(ipr_cmd);
- spin_unlock(&ipr_cmd->hrrq->_lock);
-
- return 0;
-}
-
-/**
- * ipr_qc_fill_rtf - Read result TF
- * @qc: ATA queued command
- **/
-static void ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
-{
- struct ipr_sata_port *sata_port = qc->ap->private_data;
- struct ipr_ioasa_gata *g = &sata_port->ioasa;
- struct ata_taskfile *tf = &qc->result_tf;
-
- tf->feature = g->error;
- tf->nsect = g->nsect;
- tf->lbal = g->lbal;
- tf->lbam = g->lbam;
- tf->lbah = g->lbah;
- tf->device = g->device;
- tf->command = g->status;
- tf->hob_nsect = g->hob_nsect;
- tf->hob_lbal = g->hob_lbal;
- tf->hob_lbam = g->hob_lbam;
- tf->hob_lbah = g->hob_lbah;
-}
-
-static struct ata_port_operations ipr_sata_ops = {
- .phy_reset = ipr_ata_phy_reset,
- .hardreset = ipr_sata_reset,
- .post_internal_cmd = ipr_ata_post_internal,
- .qc_prep = ata_noop_qc_prep,
- .qc_defer = ipr_qc_defer,
- .qc_issue = ipr_qc_issue,
- .qc_fill_rtf = ipr_qc_fill_rtf,
- .port_start = ata_sas_port_start,
- .port_stop = ata_sas_port_stop
-};
-
-static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
- ATA_FLAG_SAS_HOST,
- .pio_mask = ATA_PIO4_ONLY,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ipr_sata_ops
-};
-
#ifdef CONFIG_PPC_PSERIES
static const u16 ipr_blocked_processors[] = {
PVR_NORTHSTAR,
@@ -10181,7 +9414,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
- ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 69444d21fca1..c77d6ca1a210 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -16,7 +16,6 @@
#include <asm/unaligned.h>
#include <linux/types.h>
#include <linux/completion.h>
-#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/irq_poll.h>
@@ -35,7 +34,6 @@
* This can be adjusted at runtime through sysfs device attributes.
*/
#define IPR_MAX_CMD_PER_LUN 6
-#define IPR_MAX_CMD_PER_ATA_LUN 1
/*
* IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
@@ -197,7 +195,6 @@
#define IPR_LUN_RESET 0x40
#define IPR_TARGET_RESET 0x20
#define IPR_BUS_RESET 0x10
-#define IPR_ATA_PHY_RESET 0x80
#define IPR_ID_HOST_RR_Q 0xC4
#define IPR_QUERY_IOA_CONFIG 0xC5
#define IPR_CANCEL_ALL_REQUESTS 0xCE
@@ -521,7 +518,6 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02
-#define IPR_RQTYPE_ATA_PASSTHRU 0x04
#define IPR_RQTYPE_PIPE 0x05
u8 reserved2;
@@ -546,30 +542,6 @@ struct ipr_cmd_pkt {
__be16 timeout;
}__attribute__ ((packed, aligned(4)));
-struct ipr_ioarcb_ata_regs { /* 22 bytes */
- u8 flags;
-#define IPR_ATA_FLAG_PACKET_CMD 0x80
-#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
-#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
- u8 reserved[3];
-
- __be16 data;
- u8 feature;
- u8 nsect;
- u8 lbal;
- u8 lbam;
- u8 lbah;
- u8 device;
- u8 command;
- u8 reserved2[3];
- u8 hob_feature;
- u8 hob_nsect;
- u8 hob_lbal;
- u8 hob_lbam;
- u8 hob_lbah;
- u8 ctl;
-}__attribute__ ((packed, aligned(2)));
-
struct ipr_ioadl_desc {
__be32 flags_and_data_len;
#define IPR_IOADL_FLAGS_MASK 0xff000000
@@ -591,15 +563,8 @@ struct ipr_ioadl64_desc {
__be64 address;
}__attribute__((packed, aligned (16)));
-struct ipr_ata64_ioadl {
- struct ipr_ioarcb_ata_regs regs;
- u16 reserved[5];
- struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
-}__attribute__((packed, aligned (16)));
-
struct ipr_ioarcb_add_data {
union {
- struct ipr_ioarcb_ata_regs regs;
struct ipr_ioadl_desc ioadl[5];
__be32 add_cmd_parms[10];
} u;
@@ -665,21 +630,6 @@ struct ipr_ioasa_gpdd {
__be32 ioa_data[2];
}__attribute__((packed, aligned (4)));
-struct ipr_ioasa_gata {
- u8 error;
- u8 nsect; /* Interrupt reason */
- u8 lbal;
- u8 lbam;
- u8 lbah;
- u8 device;
- u8 status;
- u8 alt_status; /* ATA CTL */
- u8 hob_nsect;
- u8 hob_lbal;
- u8 hob_lbam;
- u8 hob_lbah;
-}__attribute__((packed, aligned (4)));
-
struct ipr_auto_sense {
__be16 auto_sense_len;
__be16 ioa_data_len;
@@ -713,7 +663,6 @@ struct ipr_ioasa_hdr {
__be32 ioasc_specific; /* status code specific field */
#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
#define IPR_AUTOSENSE_VALID 0x40000000
-#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
#define IPR_FIELD_POINTER_MASK 0x0000ffff
@@ -727,7 +676,6 @@ struct ipr_ioasa {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
- struct ipr_ioasa_gata gata;
} u;
struct ipr_auto_sense auto_sense;
@@ -741,7 +689,6 @@ struct ipr_ioasa64 {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
- struct ipr_ioasa_gata gata;
} u;
struct ipr_auto_sense auto_sense;
@@ -1279,13 +1226,6 @@ struct ipr_bus_attributes {
u32 max_xfer_rate;
};
-struct ipr_sata_port {
- struct ipr_ioa_cfg *ioa_cfg;
- struct ata_port *ap;
- struct ipr_resource_entry *res;
- struct ipr_ioasa_gata ioasa;
-};
-
struct ipr_resource_entry {
u8 needs_sync_complete:1;
u8 in_erp:1;
@@ -1323,7 +1263,6 @@ struct ipr_resource_entry {
struct ipr_ioa_cfg *ioa_cfg;
struct scsi_device *sdev;
- struct ipr_sata_port *sata_port;
struct list_head queue;
}; /* struct ipr_resource_entry */
@@ -1582,7 +1521,6 @@ struct ipr_ioa_cfg {
struct ipr_cmnd *reset_cmd;
int (*reset) (struct ipr_cmnd *);
- struct ata_host ata_host;
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd"
u32 max_cmds;
@@ -1604,7 +1542,6 @@ struct ipr_cmnd {
union {
struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
- struct ipr_ata64_ioadl ata_ioadl;
} i;
union {
struct ipr_ioasa ioasa;
@@ -1612,7 +1549,6 @@ struct ipr_cmnd {
} s;
struct list_head queue;
struct scsi_cmnd *scsi_cmd;
- struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index ec1a9ab61814..73cd25f30ca5 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3362,8 +3362,9 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
- pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n",
- status);
+ pm8001_dbg(pm8001_ha, INIT,
+ "register device status %d phy_id 0x%x device_id %d\n",
+ status, pm8001_dev->attached_phy, device_id);
switch (status) {
case DEVREG_SUCCESS:
pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
@@ -4278,7 +4279,7 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
- pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
+ pm8001_dbg(pm8001_ha, INIT, "unregister device device_id %d\n",
device_id);
return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index f2ee49756df8..45d359554182 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_ops->ll2->stop(qedi->cdev);
}
+ cancel_delayed_work_sync(&qedi->recovery_work);
+ cancel_delayed_work_sync(&qedi->board_disable_work);
+
qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f4fa1035a191..8c58128ad32a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5291,6 +5291,26 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return SUCCESS;
}
+static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
+{
+ struct scsi_device *sdp = data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+
+ if (scmd->device == sdp)
+ scsi_debug_abort_cmnd(scmd);
+
+ return true;
+}
+
+/* Deletes (stops) timers or work queues of all queued commands per sdev */
+static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
+{
+ struct Scsi_Host *shost = sdp->host;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_debug_stop_all_queued_iter, sdp);
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
@@ -5300,6 +5320,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+
+ scsi_debug_stop_all_queued(sdp);
if (devip)
set_bit(SDEBUG_UA_POR, devip->uas_bm);
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 31df052fbc41..202ff71e1b58 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -299,11 +299,11 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
- unsigned long completed_reqs;
+ unsigned long completed_reqs, flags;
- spin_lock(&hwq->cq_lock);
+ spin_lock_irqsave(&hwq->cq_lock, flags);
completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
- spin_unlock(&hwq->cq_lock);
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
return completed_reqs;
}