diff options
Diffstat (limited to 'drivers')
215 files changed, 2414 insertions, 1396 deletions
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index c31081e42cee..820d133236dd 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -97,6 +97,8 @@ struct dma_bridge_chan { * response queue's head and tail pointer of this DBC. */ void __iomem *dbc_base; + /* Synchronizes access to Request queue's head and tail pointer */ + struct mutex req_lock; /* Head of list where each node is a memory handle queued in request queue */ struct list_head xfer_list; /* Synchronizes DBC readers during cleanup */ diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index d8bdab69f800..b86a8e48e731 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev, return -EINVAL; remaining = in_trans->size - resources->xferred_dma_size; if (remaining == 0) - return 0; + return -EINVAL; if (check_add_overflow(xfer_start_addr, remaining, &end)) return -EINVAL; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 797289e9d780..c4f117edb266 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1356,13 +1356,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr goto release_ch_rcu; } + ret = mutex_lock_interruptible(&dbc->req_lock); + if (ret) + goto release_ch_rcu; + head = readl(dbc->dbc_base + REQHP_OFF); tail = readl(dbc->dbc_base + REQTP_OFF); if (head == U32_MAX || tail == U32_MAX) { /* PCI link error */ ret = -ENODEV; - goto release_ch_rcu; + goto unlock_req_lock; } queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); @@ -1370,11 +1374,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, head, &tail); if (ret) - goto release_ch_rcu; + goto unlock_req_lock; /* Finalize commit to hardware */ submit_ts = ktime_get_ns(); writel(tail, dbc->dbc_base + REQTP_OFF); + mutex_unlock(&dbc->req_lock); update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, submit_ts, queue_level); @@ -1382,6 +1387,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr if (datapath_polling) schedule_work(&dbc->poll_work); +unlock_req_lock: + if (ret) + mutex_unlock(&dbc->req_lock); release_ch_rcu: srcu_read_unlock(&dbc->ch_lock, rcu_id); unlock_dev_srcu: diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c index a991b8198dc4..8dc4fe5bb560 100644 --- a/drivers/accel/qaic/qaic_debugfs.c +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -218,6 +218,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d if (ret) goto destroy_workqueue; + dev_set_drvdata(&mhi_dev->dev, qdev); + qdev->bootlog_ch = mhi_dev; + for (i = 0; i < BOOTLOG_POOL_SIZE; i++) { msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL); if (!msg) { @@ -233,8 +236,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d goto mhi_unprepare; } - dev_set_drvdata(&mhi_dev->dev, qdev); - qdev->bootlog_ch = mhi_dev; return 0; mhi_unprepare: diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index e31bcb0ecfc9..e162f4b8a262 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -454,6 +454,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, return NULL; init_waitqueue_head(&qdev->dbc[i].dbc_release); INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); + ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock); + if (ret) + return NULL; } return qdev; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ff53f5f029b4..2a210719c4ce 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2174,13 +2174,10 @@ static int ata_read_log_directory(struct ata_device *dev) } version = get_unaligned_le16(&dev->gp_log_dir[0]); - if (version != 0x0001) { - ata_dev_err(dev, "Invalid log directory version 0x%04x\n", - version); - ata_clear_log_directory(dev); - dev->quirks |= ATA_QUIRK_NO_LOG_DIR; - return -EINVAL; - } + if (version != 0x0001) + ata_dev_warn_once(dev, + "Invalid log directory version 0x%04x\n", + version); return 0; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 053a086d547e..13ce229d450c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -551,8 +551,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, return -EBADF; error = loop_check_backing_file(file); - if (error) + if (error) { + fput(file); return error; + } /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); @@ -822,7 +824,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) if (worker) goto queue_work; - worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN); + worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT); /* * In the event we cannot allocate a worker, just queue on the * rootcg worker and issue the I/O as the rootcg @@ -993,8 +995,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, return -EBADF; error = loop_check_backing_file(file); - if (error) + if (error) { + fput(file); return error; + } is_loop = is_loop_device(file); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index a0b67a35a5f0..3700ab4eba3e 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2301,8 +2301,11 @@ static int i_ipmi_request(struct ipmi_user *user, if (supplied_recv) { recv_msg = supplied_recv; recv_msg->user = user; - if (user) + if (user) { atomic_inc(&user->nr_msgs); + /* The put happens when the message is freed. */ + kref_get(&user->refcount); + } } else { recv_msg = ipmi_alloc_recv_msg(user); if (IS_ERR(recv_msg)) diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index ba3924eb13ba..8a8f692b6088 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -29,10 +29,11 @@ if TCG_TPM config TCG_TPM2_HMAC bool "Use HMAC and encrypted transactions on the TPM bus" - default X86_64 + default n select CRYPTO_ECDH select CRYPTO_LIB_AESCFB select CRYPTO_LIB_SHA256 + select CRYPTO_LIB_UTILS help Setting this causes us to deploy a scheme which uses request and response HMACs in addition to encryption for diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index b71725827743..c9f173001d0e 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -52,7 +52,7 @@ MODULE_PARM_DESC(suspend_pcr, unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { if (chip->flags & TPM_CHIP_FLAG_TPM2) - return tpm2_calc_ordinal_duration(chip, ordinal); + return tpm2_calc_ordinal_duration(ordinal); else return tpm1_calc_ordinal_duration(chip, ordinal); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 7bb87fa5f7a1..2726bd38e5ac 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -299,7 +299,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip); int tpm2_auto_startup(struct tpm_chip *chip); void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); -unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); +unsigned long tpm2_calc_ordinal_duration(u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 524d802ede26..7d77f6fbc152 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -28,120 +28,57 @@ static struct tpm2_hash tpm2_hash_map[] = { int tpm2_get_timeouts(struct tpm_chip *chip) { - /* Fixed timeouts for TPM2 */ chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); - - /* PTP spec timeouts */ - chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); - chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); - chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); - - /* Key creation commands long timeouts */ - chip->duration[TPM_LONG_LONG] = - msecs_to_jiffies(TPM2_DURATION_LONG_LONG); - chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; - return 0; } -/** - * tpm2_ordinal_duration_index() - returns an index to the chip duration table - * @ordinal: TPM command ordinal. - * - * The function returns an index to the chip duration table - * (enum tpm_duration), that describes the maximum amount of - * time the chip could take to return the result for a particular ordinal. - * - * The values of the MEDIUM, and LONG durations are taken - * from the PC Client Profile (PTP) specification (750, 2000 msec) - * - * LONG_LONG is for commands that generates keys which empirically takes - * a longer time on some systems. - * - * Return: - * * TPM_MEDIUM - * * TPM_LONG - * * TPM_LONG_LONG - * * TPM_UNDEFINED +/* + * Contains the maximum durations in milliseconds for TPM2 commands. */ -static u8 tpm2_ordinal_duration_index(u32 ordinal) -{ - switch (ordinal) { - /* Startup */ - case TPM2_CC_STARTUP: /* 144 */ - return TPM_MEDIUM; - - case TPM2_CC_SELF_TEST: /* 143 */ - return TPM_LONG; - - case TPM2_CC_GET_RANDOM: /* 17B */ - return TPM_LONG; - - case TPM2_CC_SEQUENCE_UPDATE: /* 15C */ - return TPM_MEDIUM; - case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */ - return TPM_MEDIUM; - case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */ - return TPM_MEDIUM; - case TPM2_CC_HASH_SEQUENCE_START: /* 186 */ - return TPM_MEDIUM; - - case TPM2_CC_VERIFY_SIGNATURE: /* 177 */ - return TPM_LONG_LONG; - - case TPM2_CC_PCR_EXTEND: /* 182 */ - return TPM_MEDIUM; - - case TPM2_CC_HIERARCHY_CONTROL: /* 121 */ - return TPM_LONG; - case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */ - return TPM_LONG; - - case TPM2_CC_GET_CAPABILITY: /* 17A */ - return TPM_MEDIUM; - - case TPM2_CC_NV_READ: /* 14E */ - return TPM_LONG; - - case TPM2_CC_CREATE_PRIMARY: /* 131 */ - return TPM_LONG_LONG; - case TPM2_CC_CREATE: /* 153 */ - return TPM_LONG_LONG; - case TPM2_CC_CREATE_LOADED: /* 191 */ - return TPM_LONG_LONG; - - default: - return TPM_UNDEFINED; - } -} +static const struct { + unsigned long ordinal; + unsigned long duration; +} tpm2_ordinal_duration_map[] = { + {TPM2_CC_STARTUP, 750}, + {TPM2_CC_SELF_TEST, 3000}, + {TPM2_CC_GET_RANDOM, 2000}, + {TPM2_CC_SEQUENCE_UPDATE, 750}, + {TPM2_CC_SEQUENCE_COMPLETE, 750}, + {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750}, + {TPM2_CC_HASH_SEQUENCE_START, 750}, + {TPM2_CC_VERIFY_SIGNATURE, 30000}, + {TPM2_CC_PCR_EXTEND, 750}, + {TPM2_CC_HIERARCHY_CONTROL, 2000}, + {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000}, + {TPM2_CC_GET_CAPABILITY, 750}, + {TPM2_CC_NV_READ, 2000}, + {TPM2_CC_CREATE_PRIMARY, 30000}, + {TPM2_CC_CREATE, 30000}, + {TPM2_CC_CREATE_LOADED, 30000}, +}; /** - * tpm2_calc_ordinal_duration() - calculate the maximum command duration - * @chip: TPM chip to use. + * tpm2_calc_ordinal_duration() - Calculate the maximum command duration * @ordinal: TPM command ordinal. * - * The function returns the maximum amount of time the chip could take - * to return the result for a particular ordinal in jiffies. - * - * Return: A maximal duration time for an ordinal in jiffies. + * Returns the maximum amount of time the chip is expected by kernel to + * take in jiffies. */ -unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) +unsigned long tpm2_calc_ordinal_duration(u32 ordinal) { - unsigned int index; + int i; - index = tpm2_ordinal_duration_index(ordinal); + for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++) + if (ordinal == tpm2_ordinal_duration_map[i].ordinal) + return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration); - if (index != TPM_UNDEFINED) - return chip->duration[index]; - else - return msecs_to_jiffies(TPM2_DURATION_DEFAULT); + return msecs_to_jiffies(TPM2_DURATION_DEFAULT); } - struct tpm2_pcr_read_out { __be32 update_cnt; __be32 pcr_selects_cnt; diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index bdb119453dfb..6d03c224e6b2 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -69,8 +69,8 @@ #include <linux/unaligned.h> #include <crypto/kpp.h> #include <crypto/ecdh.h> -#include <crypto/hash.h> -#include <crypto/hmac.h> +#include <crypto/sha2.h> +#include <crypto/utils.h> /* maximum number of names the TPM must remember for authorization */ #define AUTH_MAX_NAMES 3 @@ -385,51 +385,6 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, u32 *handle, u8 *name); /* - * It turns out the crypto hmac(sha256) is hard for us to consume - * because it assumes a fixed key and the TPM seems to change the key - * on every operation, so we weld the hmac init and final functions in - * here to give it the same usage characteristics as a regular hash - */ -static void tpm2_hmac_init(struct sha256_ctx *sctx, u8 *key, u32 key_len) -{ - u8 pad[SHA256_BLOCK_SIZE]; - int i; - - sha256_init(sctx); - for (i = 0; i < sizeof(pad); i++) { - if (i < key_len) - pad[i] = key[i]; - else - pad[i] = 0; - pad[i] ^= HMAC_IPAD_VALUE; - } - sha256_update(sctx, pad, sizeof(pad)); -} - -static void tpm2_hmac_final(struct sha256_ctx *sctx, u8 *key, u32 key_len, - u8 *out) -{ - u8 pad[SHA256_BLOCK_SIZE]; - int i; - - for (i = 0; i < sizeof(pad); i++) { - if (i < key_len) - pad[i] = key[i]; - else - pad[i] = 0; - pad[i] ^= HMAC_OPAD_VALUE; - } - - /* collect the final hash; use out as temporary storage */ - sha256_final(sctx, out); - - sha256_init(sctx); - sha256_update(sctx, pad, sizeof(pad)); - sha256_update(sctx, out, SHA256_DIGEST_SIZE); - sha256_final(sctx, out); -} - -/* * assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but * otherwise standard tpm2_KDFa. Note output is in bytes not bits. */ @@ -440,16 +395,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u, const __be32 bits = cpu_to_be32(bytes * 8); while (bytes > 0) { - struct sha256_ctx sctx; + struct hmac_sha256_ctx hctx; __be32 c = cpu_to_be32(counter); - tpm2_hmac_init(&sctx, key, key_len); - sha256_update(&sctx, (u8 *)&c, sizeof(c)); - sha256_update(&sctx, label, strlen(label)+1); - sha256_update(&sctx, u, SHA256_DIGEST_SIZE); - sha256_update(&sctx, v, SHA256_DIGEST_SIZE); - sha256_update(&sctx, (u8 *)&bits, sizeof(bits)); - tpm2_hmac_final(&sctx, key, key_len, out); + hmac_sha256_init_usingrawkey(&hctx, key, key_len); + hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c)); + hmac_sha256_update(&hctx, label, strlen(label) + 1); + hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE); + hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE); + hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits)); + hmac_sha256_final(&hctx, out); bytes -= SHA256_DIGEST_SIZE; counter++; @@ -593,6 +548,7 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) u32 attrs; u8 cphash[SHA256_DIGEST_SIZE]; struct sha256_ctx sctx; + struct hmac_sha256_ctx hctx; if (!auth) return; @@ -704,14 +660,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) sha256_final(&sctx, cphash); /* now calculate the hmac */ - tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key) - + auth->passphrase_len); - sha256_update(&sctx, cphash, sizeof(cphash)); - sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce)); - sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); - sha256_update(&sctx, &auth->attrs, 1); - tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key) - + auth->passphrase_len, hmac); + hmac_sha256_init_usingrawkey(&hctx, auth->session_key, + sizeof(auth->session_key) + + auth->passphrase_len); + hmac_sha256_update(&hctx, cphash, sizeof(cphash)); + hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce)); + hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + hmac_sha256_update(&hctx, &auth->attrs, 1); + hmac_sha256_final(&hctx, hmac); } EXPORT_SYMBOL(tpm_buf_fill_hmac_session); @@ -751,6 +707,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, u8 rphash[SHA256_DIGEST_SIZE]; u32 attrs, cc; struct sha256_ctx sctx; + struct hmac_sha256_ctx hctx; u16 tag = be16_to_cpu(head->tag); int parm_len, len, i, handles; @@ -820,21 +777,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, sha256_final(&sctx, rphash); /* now calculate the hmac */ - tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key) - + auth->passphrase_len); - sha256_update(&sctx, rphash, sizeof(rphash)); - sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); - sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce)); - sha256_update(&sctx, &auth->attrs, 1); + hmac_sha256_init_usingrawkey(&hctx, auth->session_key, + sizeof(auth->session_key) + + auth->passphrase_len); + hmac_sha256_update(&hctx, rphash, sizeof(rphash)); + hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce)); + hmac_sha256_update(&hctx, &auth->attrs, 1); /* we're done with the rphash, so put our idea of the hmac there */ - tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key) - + auth->passphrase_len, rphash); - if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) { - rc = 0; - } else { + hmac_sha256_final(&hctx, rphash); + if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) { dev_err(&chip->dev, "TPM: HMAC check failed\n"); goto out; } + rc = 0; /* now do response decryption */ if (auth->attrs & TPM2_SA_ENCRYPT) { diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index ed97344f2324..c75a531cfb98 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -133,8 +133,7 @@ static inline bool tpm_crb_has_idle(u32 start_method) { return !(start_method == ACPI_TPM2_START_METHOD || start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD || - start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC || - start_method == ACPI_TPM2_CRB_WITH_ARM_FFA); + start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); } static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, @@ -191,7 +190,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete * * Return: 0 always */ -static int __crb_go_idle(struct device *dev, struct crb_priv *priv) +static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc) { int rc; @@ -200,6 +199,12 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv) iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc); + if (rc) + return rc; + } + rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; @@ -220,7 +225,7 @@ static int crb_go_idle(struct tpm_chip *chip) struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); - return __crb_go_idle(dev, priv); + return __crb_go_idle(dev, priv, chip->locality); } /** @@ -238,7 +243,7 @@ static int crb_go_idle(struct tpm_chip *chip) * * Return: 0 on success -ETIME on timeout; */ -static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) +static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc) { int rc; @@ -247,6 +252,12 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req); + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc); + if (rc) + return rc; + } + rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; @@ -267,7 +278,7 @@ static int crb_cmd_ready(struct tpm_chip *chip) struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); - return __crb_cmd_ready(dev, priv); + return __crb_cmd_ready(dev, priv, chip->locality); } static int __crb_request_locality(struct device *dev, @@ -444,7 +455,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len) /* Seems to be necessary for every command */ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) - __crb_cmd_ready(&chip->dev, priv); + __crb_cmd_ready(&chip->dev, priv, chip->locality); memcpy_toio(priv->cmd, buf, len); @@ -672,7 +683,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ - ret = __crb_cmd_ready(dev, priv); + ret = __crb_cmd_ready(dev, priv, 0); if (ret) goto out_relinquish_locality; @@ -744,7 +755,7 @@ out: if (!ret) priv->cmd_size = cmd_size; - __crb_go_idle(dev, priv); + __crb_go_idle(dev, priv, 0); out_relinquish_locality: diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index d53fce1c9d6f..c9793a3d986d 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid = GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4, 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53); +static const char * const tpm_ppi_info[] = { + "Not implemented", + "BIOS only", + "Blocked for OS by system firmware", + "User required", + "User not required", +}; + +/* A spinlock to protect access to the cache from concurrent reads */ +static DEFINE_MUTEX(tpm_ppi_lock); + +static u32 ppi_operations_cache[PPI_VS_REQ_END + 1]; +static bool ppi_cache_populated; + static bool tpm_ppi_req_has_parameter(u64 req) { return req == 23; @@ -277,8 +291,7 @@ cleanup: return status; } -static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, - u32 end) +static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf) { int i; u32 ret; @@ -286,34 +299,22 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, union acpi_object *obj, tmp; union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp); - static char *info[] = { - "Not implemented", - "BIOS only", - "Blocked for OS by BIOS", - "User required", - "User not required", - }; - if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_GETOPR)) return -EPERM; tmp.integer.type = ACPI_TYPE_INTEGER; - for (i = start; i <= end; i++) { + for (i = 0; i <= PPI_VS_REQ_END; i++) { tmp.integer.value = i; obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR, ACPI_TYPE_INTEGER, &argv, TPM_PPI_REVISION_ID_1); - if (!obj) { + if (!obj) return -ENOMEM; - } else { - ret = obj->integer.value; - ACPI_FREE(obj); - } - if (ret > 0 && ret < ARRAY_SIZE(info)) - len += sysfs_emit_at(buf, len, "%d %d: %s\n", - i, ret, info[ret]); + ret = obj->integer.value; + ppi_operations_cache[i] = ret; + ACPI_FREE(obj); } return len; @@ -324,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t len = 0; + u32 ret; + int i; + + mutex_lock(&tpm_ppi_lock); + if (!ppi_cache_populated) { + len = cache_ppi_operations(chip->acpi_dev_handle, buf); + if (len < 0) { + mutex_unlock(&tpm_ppi_lock); + return len; + } - return show_ppi_operations(chip->acpi_dev_handle, buf, 0, - PPI_TPM_REQ_MAX); + ppi_cache_populated = true; + } + + for (i = 0; i <= PPI_TPM_REQ_MAX; i++) { + ret = ppi_operations_cache[i]; + if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info)) + len += sysfs_emit_at(buf, len, "%d %d: %s\n", + i, ret, tpm_ppi_info[ret]); + } + mutex_unlock(&tpm_ppi_lock); + + return len; } static ssize_t tpm_show_ppi_vs_operations(struct device *dev, @@ -334,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t len = 0; + u32 ret; + int i; - return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START, - PPI_VS_REQ_END); + mutex_lock(&tpm_ppi_lock); + if (!ppi_cache_populated) { + len = cache_ppi_operations(chip->acpi_dev_handle, buf); + if (len < 0) { + mutex_unlock(&tpm_ppi_lock); + return len; + } + + ppi_cache_populated = true; + } + + for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) { + ret = ppi_operations_cache[i]; + if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info)) + len += sysfs_emit_at(buf, len, "%d %d: %s\n", + i, ret, tpm_ppi_info[ret]); + } + mutex_unlock(&tpm_ppi_lock); + + return len; } static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 4b12c4b9da8b..8954a8660ffc 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -978,8 +978,8 @@ restore_irqs: * will call disable_irq which undoes all of the above. */ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { - tpm_tis_write8(priv, original_int_vec, - TPM_INT_VECTOR(priv->locality)); + tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), + original_int_vec); rc = -1; } diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index d7a5539d07d4..bd2e282ca93a 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -348,7 +348,7 @@ static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd) struct resource res; int nid, rc; - res = DEFINE_RES(start, size, 0); + res = DEFINE_RES_MEM(start, size); nid = phys_to_target_node(start); rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c index 7c750599ea69..4bc484b46f43 100644 --- a/drivers/cxl/core/features.c +++ b/drivers/cxl/core/features.c @@ -371,6 +371,9 @@ cxl_feature_info(struct cxl_features_state *cxlfs, { struct cxl_feat_entry *feat; + if (!cxlfs || !cxlfs->entries) + return ERR_PTR(-EOPNOTSUPP); + for (int i = 0; i < cxlfs->entries->num_features; i++) { feat = &cxlfs->entries->ent[i]; if (uuid_equal(uuid, &feat->uuid)) diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index d5f71eb1ade8..8128fd2b5b31 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1182,6 +1182,20 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, if (rc) return ERR_PTR(rc); + /* + * Setup port register if this is the first dport showed up. Having + * a dport also means that there is at least 1 active link. + */ + if (port->nr_dports == 1 && + port->component_reg_phys != CXL_RESOURCE_NONE) { + rc = cxl_port_setup_regs(port, port->component_reg_phys); + if (rc) { + xa_erase(&port->dports, (unsigned long)dport->dport_dev); + return ERR_PTR(rc); + } + port->component_reg_phys = CXL_RESOURCE_NONE; + } + get_device(dport_dev); rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); if (rc) @@ -1200,18 +1214,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, cxl_debugfs_create_dport_dir(dport); - /* - * Setup port register if this is the first dport showed up. Having - * a dport also means that there is at least 1 active link. - */ - if (port->nr_dports == 1 && - port->component_reg_phys != CXL_RESOURCE_NONE) { - rc = cxl_port_setup_regs(port, port->component_reg_phys); - if (rc) - return ERR_PTR(rc); - port->component_reg_phys = CXL_RESOURCE_NONE; - } - return dport; } diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index e14c1d305b22..b06fee1978ba 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -839,7 +839,7 @@ static int match_free_decoder(struct device *dev, const void *data) } static bool region_res_match_cxl_range(const struct cxl_region_params *p, - struct range *range) + const struct range *range) { if (!p->res) return false; @@ -3398,10 +3398,7 @@ static int match_region_by_range(struct device *dev, const void *data) p = &cxlr->params; guard(rwsem_read)(&cxl_rwsem.region); - if (p->res && p->res->start == r->start && p->res->end == r->end) - return 1; - - return 0; + return region_res_match_cxl_range(p, r); } static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr, @@ -3666,14 +3663,14 @@ static int validate_region_offset(struct cxl_region *cxlr, u64 offset) if (offset < p->cache_size) { dev_err(&cxlr->dev, - "Offset %#llx is within extended linear cache %pr\n", + "Offset %#llx is within extended linear cache %pa\n", offset, &p->cache_size); return -EINVAL; } region_size = resource_size(p->res); if (offset >= region_size) { - dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pr\n", + dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pa\n", offset, ®ion_size); return -EINVAL; } diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index a53ec4798b12..a972e4ef1936 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -1068,7 +1068,7 @@ TRACE_EVENT(cxl_poison, __entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd, __entry->dpa); if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size) - __entry->hpa_alias0 = __entry->hpa + + __entry->hpa_alias0 = __entry->hpa - cxlr->params.cache_size; else __entry->hpa_alias0 = ULLONG_MAX; diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 5a2c9badcc64..5e6e7e900bda 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/bitfield.h> +#include <linux/hw_bitfield.h> #include <linux/bits.h> #include <linux/perf_event.h> @@ -30,8 +31,6 @@ #define DMC_MAX_CHANNELS 4 -#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) - /* DDRMON_CTRL */ #define DDRMON_CTRL 0x04 #define DDRMON_CTRL_LPDDR5 BIT(6) @@ -41,10 +40,6 @@ #define DDRMON_CTRL_LPDDR23 BIT(2) #define DDRMON_CTRL_SOFTWARE_EN BIT(1) #define DDRMON_CTRL_TIMER_CNT_EN BIT(0) -#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \ - DDRMON_CTRL_DDR4 | \ - DDRMON_CTRL_LPDDR4 | \ - DDRMON_CTRL_LPDDR23) #define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7) #define DDRMON_CH0_WR_NUM 0x20 @@ -124,27 +119,31 @@ struct rockchip_dfi { unsigned int count_multiplier; /* number of data clocks per count */ }; -static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl, - u32 *mask) +static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl) { u32 ddrmon_ver; - *mask = DDRMON_CTRL_DDR_TYPE_MASK; - switch (dfi->ddr_type) { case ROCKCHIP_DDRTYPE_LPDDR2: case ROCKCHIP_DDRTYPE_LPDDR3: - *ctrl = DDRMON_CTRL_LPDDR23; + *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 1) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0); break; case ROCKCHIP_DDRTYPE_LPDDR4: case ROCKCHIP_DDRTYPE_LPDDR4X: - *ctrl = DDRMON_CTRL_LPDDR4; + *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 1) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0); break; case ROCKCHIP_DDRTYPE_LPDDR5: ddrmon_ver = readl_relaxed(dfi->regs); if (ddrmon_ver < 0x40) { - *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode; - *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK; + *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 1) | + FIELD_PREP_WM16(DDRMON_CTRL_LP5_BANK_MODE_MASK, + dfi->lp5_bank_mode); break; } @@ -172,7 +171,6 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) void __iomem *dfi_regs = dfi->regs; int i, ret = 0; u32 ctrl; - u32 ctrl_mask; mutex_lock(&dfi->mutex); @@ -186,7 +184,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) goto out; } - ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask); + ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl); if (ret) goto out; @@ -196,15 +194,16 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) continue; /* clear DDRMON_CTRL setting */ - writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN | - DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN), + writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_TIMER_CNT_EN, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_HARDWARE_EN, 0), dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); - writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask), - dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride + + DDRMON_CTRL); /* enable count, use software mode */ - writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN), + writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 1), dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); if (dfi->ddrmon_ctrl_single) @@ -234,8 +233,8 @@ static void rockchip_dfi_disable(struct rockchip_dfi *dfi) if (!(dfi->channel_mask & BIT(i))) continue; - writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN), - dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0), + dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); if (dfi->ddrmon_ctrl_single) break; diff --git a/drivers/gpio/gpio-usbio.c b/drivers/gpio/gpio-usbio.c index e13c120824e3..34d42c743d5b 100644 --- a/drivers/gpio/gpio-usbio.c +++ b/drivers/gpio/gpio-usbio.c @@ -29,6 +29,7 @@ static const struct acpi_device_id usbio_gpio_acpi_hids[] = { { "INTC1007" }, /* MTL */ { "INTC10B2" }, /* ARL */ { "INTC10B5" }, /* LNL */ + { "INTC10D1" }, /* MTL-CVF */ { "INTC10E2" }, /* PTL */ { } }; diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index 4af504c23e6f..572b85e77370 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -103,7 +103,7 @@ static int wcd_gpio_probe(struct platform_device *pdev) chip->base = -1; chip->ngpio = WCD934X_NPINS; chip->label = dev_name(dev); - chip->can_sleep = false; + chip->can_sleep = true; return devm_gpiochip_add_data(dev, chip, data); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a0df4cabb99..6f5b4a0e0a34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1290,6 +1290,7 @@ struct amdgpu_device { bool debug_disable_gpu_ring_reset; bool debug_vm_userptr; bool debug_disable_ce_logs; + bool debug_enable_ce_cs; /* Protection for the following isolation structure */ struct mutex enforce_isolation_mutex; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7c54fe6b0f5d..a2ca9acf8c4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2329,10 +2329,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *mem) { - if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { + if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) { *mem = *adev->gmc.vm_fault_info; - mb(); /* make sure read happened */ - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); } return 0; } @@ -2586,12 +2585,17 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, * from the KFD, trigger a segmentation fault in VM debug mode. */ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) { + struct kfd_process *p; + pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n", pid_nr(process_info->pid), mem->va); // Send GPU VM fault to user space - kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid), - mem->va); + p = kfd_lookup_process_by_pid(process_info->pid); + if (p) { + kfd_signal_vm_fault_event_with_userptr(p, mem->va); + kfd_unref_process(p); + } } ret = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9cd7741d2254..2f6a96af7fb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -364,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, if (p->uf_bo && ring->funcs->no_user_fence) return -EINVAL; + if (!p->adev->debug_enable_ce_cs && + chunk_ib->flags & AMDGPU_IB_FLAG_CE) { + dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n"); + return -EINVAL; + } + if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) @@ -702,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, */ const s64 us_upper_bound = 200000; - if (!adev->mm_stats.log2_max_MBps) { + if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) { *max_bytes = 0; *max_vis_bytes = 0; return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a77000c2e0bb..3d032c4e2dce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1882,6 +1882,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev) { + /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4. + * It's unclear if this is a platform-specific or GPU-specific issue. + * Disable ASPM on SI for the time being. + */ + if (adev->family == AMDGPU_FAMILY_SI) + return true; + #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); @@ -6389,23 +6396,28 @@ static int amdgpu_device_sched_resume(struct list_head *device_list, if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); - if (tmp_adev->asic_reset_res) - r = tmp_adev->asic_reset_res; - - tmp_adev->asic_reset_res = 0; - - if (r) { + if (tmp_adev->asic_reset_res) { /* bad news, how to tell it to userspace ? * for ras error, we should report GPU bad status instead of * reset failure */ if (reset_context->src != AMDGPU_RESET_SRC_RAS || !amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) - dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", - atomic_read(&tmp_adev->gpu_reset_counter)); - amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + dev_info( + tmp_adev->dev, + "GPU reset(%d) failed with error %d \n", + atomic_read( + &tmp_adev->gpu_reset_counter), + tmp_adev->asic_reset_res); + amdgpu_vf_error_put(tmp_adev, + AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, + tmp_adev->asic_reset_res); + if (!r) + r = tmp_adev->asic_reset_res; + tmp_adev->asic_reset_res = 0; } else { - dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", + atomic_read(&tmp_adev->gpu_reset_counter)); if (amdgpu_acpi_smart_shift_update(tmp_adev, AMDGPU_SS_DEV_D0)) dev_warn(tmp_adev->dev, @@ -7157,28 +7169,35 @@ void amdgpu_pci_resume(struct pci_dev *pdev) static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev) { - struct pci_dev *parent = pci_upstream_bridge(adev->pdev); + struct pci_dev *swus, *swds; int r; - if (!parent || parent->vendor != PCI_VENDOR_ID_ATI) + swds = pci_upstream_bridge(adev->pdev); + if (!swds || swds->vendor != PCI_VENDOR_ID_ATI || + pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM) + return; + swus = pci_upstream_bridge(swds); + if (!swus || + (swus->vendor != PCI_VENDOR_ID_ATI && + swus->vendor != PCI_VENDOR_ID_AMD) || + pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM) return; /* If already saved, return */ if (adev->pcie_reset_ctx.swus) return; /* Upstream bridge is ATI, assume it's SWUS/DS architecture */ - r = pci_save_state(parent); + r = pci_save_state(swds); if (r) return; - adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(parent); + adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds); - parent = pci_upstream_bridge(parent); - r = pci_save_state(parent); + r = pci_save_state(swus); if (r) return; - adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(parent); + adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus); - adev->pcie_reset_ctx.swus = parent; + adev->pcie_reset_ctx.swus = swus; } static void amdgpu_device_load_switch_state(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 73401f0aeb34..dd7b2b796427 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1033,7 +1033,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, /* Until a uniform way is figured, get mask based on hwid */ switch (hw_id) { case VCN_HWID: - harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; + /* VCN vs UVD+VCE */ + if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) + harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; break; case DMU_HWID: if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) @@ -2565,7 +2567,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); @@ -2592,7 +2596,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); @@ -2619,8 +2625,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 1; + adev->sdma.sdma_mask = 1; adev->vcn.num_vcn_inst = 1; adev->gmc.num_umc = 2; + adev->gfx.xcc_mask = 1; if (adev->apu_flags & AMD_APU_IS_RAVEN2) { adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); @@ -2665,7 +2673,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega20_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 8; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); @@ -2693,8 +2703,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); arct_reg_base_init(adev); adev->sdma.num_instances = 8; + adev->sdma.sdma_mask = 0xff; adev->vcn.num_vcn_inst = 2; adev->gmc.num_umc = 8; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); @@ -2726,8 +2738,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); aldebaran_reg_base_init(adev); adev->sdma.num_instances = 5; + adev->sdma.sdma_mask = 0x1f; adev->vcn.num_vcn_inst = 2; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); @@ -2762,6 +2776,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) } else { cyan_skillfish_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bff25ef3e2d0..61268aa82df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6), AMDGPU_DEBUG_SMU_POOL = BIT(7), AMDGPU_DEBUG_VM_USERPTR = BIT(8), - AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) + AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9), + AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10) }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -2289,6 +2290,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: disable kernel logs of correctable errors\n"); adev->debug_disable_ce_logs = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) { + pr_info("debug: allowing command submission to CE engine\n"); + adev->debug_enable_ce_cs = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index fd8cca241da6..18a7829122d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -758,11 +758,42 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) * @fence: fence of the ring to signal * */ -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) { - dma_fence_set_error(&fence->base, -ETIME); - amdgpu_fence_write(fence->ring, fence->seq); - amdgpu_fence_process(fence->ring); + struct dma_fence *unprocessed; + struct dma_fence __rcu **ptr; + struct amdgpu_fence *fence; + struct amdgpu_ring *ring = af->ring; + unsigned long flags; + u32 seq, last_seq; + + last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; + seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; + + /* mark all fences from the guilty context with an error */ + spin_lock_irqsave(&ring->fence_drv.lock, flags); + do { + last_seq++; + last_seq &= ring->fence_drv.num_fences_mask; + + ptr = &ring->fence_drv.fences[last_seq]; + rcu_read_lock(); + unprocessed = rcu_dereference(*ptr); + + if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { + fence = container_of(unprocessed, struct amdgpu_fence, base); + + if (fence == af) + dma_fence_set_error(&fence->base, -ETIME); + else if (fence->context == af->context) + dma_fence_set_error(&fence->base, -ECANCELED); + } + rcu_read_unlock(); + } while (last_seq != seq); + spin_unlock_irqrestore(&ring->fence_drv.lock, flags); + /* signal the guilty fence */ + amdgpu_fence_write(ring, af->seq); + amdgpu_fence_process(ring); } void amdgpu_fence_save_wptr(struct dma_fence *fence) @@ -790,14 +821,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, struct dma_fence *unprocessed; struct dma_fence __rcu **ptr; struct amdgpu_fence *fence; - u64 wptr, i, seqno; + u64 wptr; + u32 seq, last_seq; - seqno = amdgpu_fence_read(ring); + last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; + seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; wptr = ring->fence_drv.signalled_wptr; ring->ring_backup_entries_to_copy = 0; - for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { - ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; + do { + last_seq++; + last_seq &= ring->fence_drv.num_fences_mask; + + ptr = &ring->fence_drv.fences[last_seq]; rcu_read_lock(); unprocessed = rcu_dereference(*ptr); @@ -813,7 +849,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, wptr = fence->wptr; } rcu_read_unlock(); - } + } while (last_seq != seq); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a09ccf7d8aa2..ebe2b4c68b0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1102,6 +1102,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_ might_sleep(); while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + if (amdgpu_in_reset(adev)) + goto failed_kiq_read; + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); } @@ -1171,6 +1174,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3 might_sleep(); while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + if (amdgpu_in_reset(adev)) + goto failed_kiq_write; msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6b7d66b6d4cc..63ee6ba6a931 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -371,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val) for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; - if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j))) + if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j))) ring->sched.ready = true; else ring->sched.ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8676400834fc..b3e6b3fcdf2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); + ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); @@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.vram.usable_heap_size = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; - mem.vram.heap_usage = - ttm_resource_manager_usage(vram_man); + mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(vram_man) : 0; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -1421,14 +1422,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) amdgpu_debugfs_vm_init(file_priv); - r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); + r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid); if (r) goto error_pasid; - r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); - if (r) - goto error_vm; - fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); if (!fpriv->prt_va) { r = -ENOMEM; @@ -1468,10 +1465,8 @@ error_vm: amdgpu_vm_fini(adev, &fpriv->vm); error_pasid: - if (pasid) { + if (pasid) amdgpu_pasid_free(pasid); - amdgpu_vm_set_pasid(adev, &fpriv->vm, 0); - } kfree(fpriv); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 5bf9be073cdd..4883adcfbb4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -409,7 +409,7 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev, return -EINVAL; /* Clear the doorbell array before detection */ - memset(adev->mes.hung_queue_db_array_cpu_addr, 0, + memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET, adev->mes.hung_queue_db_array_size * sizeof(u32)); input.queue_type = queue_type; input.detect_only = detect_only; @@ -420,12 +420,17 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev, dev_err(adev->dev, "failed to detect and reset\n"); } else { *hung_db_num = 0; - for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) { + for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) { if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) { hung_db_array[i] = db_array[i]; *hung_db_num += 1; } } + + /* + * TODO: return HQD info for MES scheduled user compute queue reset cases + * stored in hung_db_array hqd info offset to full array size + */ } return r; @@ -686,14 +691,11 @@ out: bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) { uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; - bool is_supported = false; - - if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && - amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && - mes_rev >= 0x63) - is_supported = true; - return is_supported; + return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && + amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && + mes_rev >= 0x63) || + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0)); } /* Fix me -- node_id is used to identify the correct MES instances in the future */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 6b506fc72f58..97c137c90f97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -149,6 +149,7 @@ struct amdgpu_mes { void *resource_1_addr[AMDGPU_MAX_MES_PIPES]; int hung_queue_db_array_size; + int hung_queue_hqd_info_offset; struct amdgpu_bo *hung_queue_db_array_gpu_obj; uint64_t hung_queue_db_array_gpu_addr; void *hung_queue_db_array_cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 1578e4e2bf84..8c0e5d03de50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2352,7 +2352,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp) } ret = psp_ta_load(psp, &psp->securedisplay_context.context); - if (!ret) { + if (!ret && !psp->securedisplay_context.context.resp_status) { psp->securedisplay_context.context.initialized = true; mutex_init(&psp->securedisplay_context.mutex); } else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 8f6ce948c684..5ec5c3ff22bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -811,7 +811,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, if (r) return r; - /* signal the fence of the bad job */ + /* signal the guilty fence and set an error on all fences from the context */ if (guilty_fence) amdgpu_fence_driver_guilty_force_completion(guilty_fence); /* Re-emit the non-guilty commands */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index b6b649179776..4b46e3c26ff3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -155,7 +155,7 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops; void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af); void amdgpu_fence_save_wptr(struct dma_fence *fence); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 48e0932f5b62..1add21160d21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -726,12 +726,12 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, struct amdgpu_bo *bo; int ret; - spin_lock(&vm->invalidated_lock); + spin_lock(&vm->status_lock); while (!list_empty(&vm->invalidated)) { bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, base.vm_status); - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); bo = bo_va->base.bo; ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); @@ -748,9 +748,9 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, if (ret) return ret; - spin_lock(&vm->invalidated_lock); + spin_lock(&vm->status_lock); } - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3328ab63376b..f96beb96c75c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -598,8 +598,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->driver_cert = 0; vf2pf_info->os_info.all = 0; - vf2pf_info->fb_usage = - ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; + vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0; vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8c28e8923f02..c1a801203949 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -139,48 +139,6 @@ static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) } /** - * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping - * - * @adev: amdgpu_device pointer - * @vm: amdgpu_vm pointer - * @pasid: the pasid the VM is using on this GPU - * - * Set the pasid this VM is using on this GPU, can also be used to remove the - * pasid by passing in zero. - * - */ -int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, - u32 pasid) -{ - int r; - - amdgpu_vm_assert_locked(vm); - - if (vm->pasid == pasid) - return 0; - - if (vm->pasid) { - r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); - if (r < 0) - return r; - - vm->pasid = 0; - } - - if (pasid) { - r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, - GFP_KERNEL)); - if (r < 0) - return r; - - vm->pasid = pasid; - } - - - return 0; -} - -/** * amdgpu_vm_bo_evicted - vm_bo is evicted * * @vm_bo: vm_bo which is evicted @@ -195,10 +153,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) vm_bo->moved = true; amdgpu_vm_assert_locked(vm); + spin_lock(&vm_bo->vm->status_lock); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&vm_bo->vm_status, &vm->evicted); else list_move_tail(&vm_bo->vm_status, &vm->evicted); + spin_unlock(&vm_bo->vm->status_lock); } /** * amdgpu_vm_bo_moved - vm_bo is moved @@ -211,7 +171,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) { amdgpu_vm_assert_locked(vm_bo->vm); + spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->moved); + spin_unlock(&vm_bo->vm->status_lock); } /** @@ -225,7 +187,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) { amdgpu_vm_assert_locked(vm_bo->vm); + spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->idle); + spin_unlock(&vm_bo->vm->status_lock); vm_bo->moved = false; } @@ -239,9 +203,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->invalidated_lock); + spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); - spin_unlock(&vm_bo->vm->invalidated_lock); + spin_unlock(&vm_bo->vm->status_lock); } /** @@ -254,9 +218,10 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) { - amdgpu_vm_assert_locked(vm_bo->vm); vm_bo->moved = true; + spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); + spin_unlock(&vm_bo->vm->status_lock); } /** @@ -270,10 +235,13 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) { amdgpu_vm_assert_locked(vm_bo->vm); - if (vm_bo->bo->parent) + if (vm_bo->bo->parent) { + spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); - else + spin_unlock(&vm_bo->vm->status_lock); + } else { amdgpu_vm_bo_idle(vm_bo); + } } /** @@ -287,7 +255,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) { amdgpu_vm_assert_locked(vm_bo->vm); + spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->done); + spin_unlock(&vm_bo->vm->status_lock); } /** @@ -301,13 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) { struct amdgpu_vm_bo_base *vm_bo, *tmp; - spin_lock(&vm->invalidated_lock); + amdgpu_vm_assert_locked(vm); + + spin_lock(&vm->status_lock); list_splice_init(&vm->done, &vm->invalidated); list_for_each_entry(vm_bo, &vm->invalidated, vm_status) vm_bo->moved = true; - spin_unlock(&vm->invalidated_lock); - amdgpu_vm_assert_locked(vm_bo->vm); list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { struct amdgpu_bo *bo = vm_bo->bo; @@ -317,13 +287,14 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) else if (bo->parent) list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); } + spin_unlock(&vm->status_lock); } /** * amdgpu_vm_update_shared - helper to update shared memory stat * @base: base structure for tracking BO usage in a VM * - * Takes the vm stats_lock and updates the shared memory stat. If the basic + * Takes the vm status_lock and updates the shared memory stat. If the basic * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called * as well. */ @@ -336,7 +307,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base) bool shared; dma_resv_assert_held(bo->tbo.base.resv); - spin_lock(&vm->stats_lock); + spin_lock(&vm->status_lock); shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); if (base->shared != shared) { base->shared = shared; @@ -348,7 +319,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base) vm->stats[bo_memtype].drm.private += size; } } - spin_unlock(&vm->stats_lock); + spin_unlock(&vm->status_lock); } /** @@ -373,11 +344,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo) * be bo->tbo.resource * @sign: if we should add (+1) or subtract (-1) from the stat * - * Caller need to have the vm stats_lock held. Useful for when multiple update + * Caller need to have the vm status_lock held. Useful for when multiple update * need to happen at the same time. */ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, - struct ttm_resource *res, int sign) + struct ttm_resource *res, int sign) { struct amdgpu_vm *vm = base->vm; struct amdgpu_bo *bo = base->bo; @@ -401,8 +372,7 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, */ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) vm->stats[res_memtype].drm.purgeable += size; - if (!(bo->preferred_domains & - amdgpu_mem_type_to_domain(res_memtype))) + if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype))) vm->stats[bo_memtype].evicted += size; } } @@ -421,9 +391,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base, { struct amdgpu_vm *vm = base->vm; - spin_lock(&vm->stats_lock); + spin_lock(&vm->status_lock); amdgpu_vm_update_stats_locked(base, res, sign); - spin_unlock(&vm->stats_lock); + spin_unlock(&vm->status_lock); } /** @@ -449,10 +419,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, base->next = bo->vm_bo; bo->vm_bo = base; - spin_lock(&vm->stats_lock); + spin_lock(&vm->status_lock); base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); - spin_unlock(&vm->stats_lock); + spin_unlock(&vm->status_lock); if (!amdgpu_vm_is_bo_always_valid(vm, bo)) return; @@ -511,10 +481,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, int ret; /* We can only trust prev->next while holding the lock */ - spin_lock(&vm->invalidated_lock); + spin_lock(&vm->status_lock); while (!list_is_head(prev->next, &vm->done)) { bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); bo = bo_va->base.bo; if (bo) { @@ -522,10 +492,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, if (unlikely(ret)) return ret; } - spin_lock(&vm->invalidated_lock); + spin_lock(&vm->status_lock); prev = prev->next; } - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); return 0; } @@ -621,7 +591,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); - struct amdgpu_vm_bo_base *bo_base, *tmp; + struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *bo; int r; @@ -634,7 +604,13 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { + spin_lock(&vm->status_lock); + while (!list_empty(&vm->evicted)) { + bo_base = list_first_entry(&vm->evicted, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); + bo = bo_base->bo; r = validate(param, bo); @@ -647,21 +623,26 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); amdgpu_vm_bo_relocated(bo_base); } + spin_lock(&vm->status_lock); } + while (ticket && !list_empty(&vm->evicted_user)) { + bo_base = list_first_entry(&vm->evicted_user, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); + + bo = bo_base->bo; + dma_resv_assert_held(bo->tbo.base.resv); - if (ticket) { - list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user, - vm_status) { - bo = bo_base->bo; - dma_resv_assert_held(bo->tbo.base.resv); + r = validate(param, bo); + if (r) + return r; - r = validate(param, bo); - if (r) - return r; + amdgpu_vm_bo_invalidated(bo_base); - amdgpu_vm_bo_invalidated(bo_base); - } + spin_lock(&vm->status_lock); } + spin_unlock(&vm->status_lock); amdgpu_vm_eviction_lock(vm); vm->evicting = false; @@ -690,7 +671,9 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) ret = !vm->evicting; amdgpu_vm_eviction_unlock(vm); + spin_lock(&vm->status_lock); ret &= list_empty(&vm->evicted); + spin_unlock(&vm->status_lock); spin_lock(&vm->immediate.lock); ret &= !vm->immediate.stopped; @@ -981,13 +964,18 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) { struct amdgpu_vm_update_params params; - struct amdgpu_vm_bo_base *entry, *tmp; + struct amdgpu_vm_bo_base *entry; bool flush_tlb_needed = false; + LIST_HEAD(relocated); int r, idx; amdgpu_vm_assert_locked(vm); - if (list_empty(&vm->relocated)) + spin_lock(&vm->status_lock); + list_splice_init(&vm->relocated, &relocated); + spin_unlock(&vm->status_lock); + + if (list_empty(&relocated)) return 0; if (!drm_dev_enter(adev_to_drm(adev), &idx)) @@ -1003,7 +991,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (r) goto error; - list_for_each_entry(entry, &vm->relocated, vm_status) { + list_for_each_entry(entry, &relocated, vm_status) { /* vm_flush_needed after updating moved PDEs */ flush_tlb_needed |= entry->moved; @@ -1019,7 +1007,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (flush_tlb_needed) atomic64_inc(&vm->tlb_seq); - list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) { + while (!list_empty(&relocated)) { + entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, + vm_status); amdgpu_vm_bo_idle(entry); } @@ -1246,9 +1236,9 @@ error_free: void amdgpu_vm_get_memory(struct amdgpu_vm *vm, struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]) { - spin_lock(&vm->stats_lock); + spin_lock(&vm->status_lock); memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); - spin_unlock(&vm->stats_lock); + spin_unlock(&vm->status_lock); } /** @@ -1615,24 +1605,29 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) { - struct amdgpu_bo_va *bo_va, *tmp; + struct amdgpu_bo_va *bo_va; struct dma_resv *resv; bool clear, unlock; int r; - list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { + spin_lock(&vm->status_lock); + while (!list_empty(&vm->moved)) { + bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, + base.vm_status); + spin_unlock(&vm->status_lock); + /* Per VM BOs never need to bo cleared in the page tables */ r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; + spin_lock(&vm->status_lock); } - spin_lock(&vm->invalidated_lock); while (!list_empty(&vm->invalidated)) { bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, base.vm_status); resv = bo_va->base.bo->tbo.base.resv; - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); /* Try to reserve the BO to avoid clearing its ptes */ if (!adev->debug_vm && dma_resv_trylock(resv)) { @@ -1664,9 +1659,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) amdgpu_vm_bo_evicted_user(&bo_va->base); - spin_lock(&vm->invalidated_lock); + spin_lock(&vm->status_lock); } - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); return 0; } @@ -2195,9 +2190,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev, } } - spin_lock(&vm->invalidated_lock); + spin_lock(&vm->status_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->invalidated_lock); + spin_unlock(&vm->status_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2305,10 +2300,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem, for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - spin_lock(&vm->stats_lock); + spin_lock(&vm->status_lock); amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1); amdgpu_vm_update_stats_locked(bo_base, new_mem, +1); - spin_unlock(&vm->stats_lock); + spin_unlock(&vm->status_lock); } amdgpu_vm_bo_invalidate(bo, evicted); @@ -2554,6 +2549,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: requested vm * @xcp_id: GPU partition selection id + * @pasid: the pasid the VM is using on this GPU * * Init @vm fields. * @@ -2561,7 +2557,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * 0 for success, error for failure. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int32_t xcp_id) + int32_t xcp_id, uint32_t pasid) { struct amdgpu_bo *root_bo; struct amdgpu_bo_vm *root; @@ -2575,12 +2571,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->idle); - spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->invalidated); + spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->done); INIT_KFIFO(vm->faults); - spin_lock_init(&vm->stats_lock); r = amdgpu_vm_init_entities(adev, vm); if (r) @@ -2638,12 +2633,26 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) dev_dbg(adev->dev, "Failed to create task info for VM\n"); + /* Store new PASID in XArray (if non-zero) */ + if (pasid != 0) { + r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL)); + if (r < 0) + goto error_free_root; + + vm->pasid = pasid; + } + amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unref(&root_bo); return 0; error_free_root: + /* If PASID was partially set, erase it from XArray before failing */ + if (vm->pasid != 0) { + xa_erase_irq(&adev->vm_manager.pasids, vm->pasid); + vm->pasid = 0; + } amdgpu_vm_pt_free_root(adev, vm); amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unref(&root_bo); @@ -2749,7 +2758,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) root = amdgpu_bo_ref(vm->root.bo); amdgpu_bo_reserve(root, true); - amdgpu_vm_set_pasid(adev, vm, 0); + /* Remove PASID mapping before destroying VM */ + if (vm->pasid != 0) { + xa_erase_irq(&adev->vm_manager.pasids, vm->pasid); + vm->pasid = 0; + } dma_fence_wait(vm->last_unlocked, false); dma_fence_put(vm->last_unlocked); dma_fence_wait(vm->last_tlb_flush, false); @@ -3038,6 +3051,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) amdgpu_vm_assert_locked(vm); + spin_lock(&vm->status_lock); seq_puts(m, "\tIdle BOs:\n"); list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { if (!bo_va->base.bo) @@ -3075,13 +3089,11 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) id = 0; seq_puts(m, "\tInvalidated BOs:\n"); - spin_lock(&vm->invalidated_lock); list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { if (!bo_va->base.bo) continue; total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); } - spin_unlock(&vm->invalidated_lock); total_invalidated_objs = id; id = 0; @@ -3091,6 +3103,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) continue; total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); } + spin_unlock(&vm->status_lock); total_done_objs = id; seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index adc5c9161fa8..cf0ec94e8a07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base { /* protected by bo being reserved */ struct amdgpu_vm_bo_base *next; - /* protected by vm reservation and invalidated_lock */ + /* protected by vm status_lock */ struct list_head vm_status; /* if the bo is counted as shared in mem stats - * protected by vm BO being reserved */ + * protected by vm status_lock */ bool shared; /* protected by the BO being reserved */ @@ -343,8 +343,10 @@ struct amdgpu_vm { bool evicting; unsigned int saved_flags; - /* Memory statistics for this vm, protected by stats_lock */ - spinlock_t stats_lock; + /* Lock to protect vm_bo add/del/move on all lists of vm */ + spinlock_t status_lock; + + /* Memory statistics for this vm, protected by status_lock */ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]; /* @@ -352,8 +354,6 @@ struct amdgpu_vm { * PDs, PTs or per VM BOs. The state transits are: * * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle - * - * Lists are protected by the root PD dma_resv lock. */ /* Per-VM and PT BOs who needs a validation */ @@ -374,10 +374,7 @@ struct amdgpu_vm { * state transits are: * * evicted_user or invalidated -> done - * - * Lists are protected by the invalidated_lock. */ - spinlock_t invalidated_lock; /* BOs for user mode queues that need a validation */ struct list_head evicted_user; @@ -503,11 +500,8 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs; void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); -int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, - u32 pasid); - long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid); int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 7a4c12ff9b18..f794fb1cc06e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -543,7 +543,9 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) entry->bo->vm_bo = NULL; ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); + spin_lock(&entry->vm->status_lock); list_del(&entry->vm_status); + spin_unlock(&entry->vm->status_lock); amdgpu_bo_unref(&entry->bo); } @@ -587,6 +589,7 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, struct amdgpu_vm_pt_cursor seek; struct amdgpu_vm_bo_base *entry; + spin_lock(¶ms->vm->status_lock); for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { if (entry && entry->bo) list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); @@ -594,6 +597,7 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, /* enter start node now */ list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); + spin_unlock(¶ms->vm->status_lock); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a5adb2ed9b3c..9d934c07fa6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj, !adev->gmc.vram_vendor) return 0; + if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) + return 0; + return attr->mode; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 66c47c466532..d61eb9f187c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5862,8 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; - BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); control |= ib->length_dw | (vmid << 24); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 710ec9c34e43..93fde0f9af87 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4419,8 +4419,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; - BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); control |= ib->length_dw | (vmid << 24); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index 404cc8c2ff2c..f4a19357ccbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -337,7 +337,7 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, int vmid, i; if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready && - (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) { + (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) { struct mes_inv_tlbs_pasid_input input = {0}; input.pasid = pasid; input.flush_type = flush_type; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 93d7ccb7d013..0e5e54d0a9a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block) GFP_KERNEL); if (!adev->gmc.vm_fault_info) return -ENOMEM; - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); return 0; } @@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) - && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, info->prot_read = protections & 0x8 ? true : false; info->prot_write = protections & 0x10 ? true : false; info->prot_exec = protections & 0x20 ? true : false; - mb(); - atomic_set(&adev->gmc.vm_fault_info_updated, 1); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c5e2a2c41e06..e1509480dfc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block) GFP_KERNEL); if (!adev->gmc.vm_fault_info) return -ENOMEM; - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); return 0; } @@ -1478,7 +1478,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) - && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1494,8 +1494,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, info->prot_read = protections & 0x8 ? true : false; info->prot_write = protections & 0x10 ? true : false; info->prot_exec = protections & 0x20 ? true : false; - mb(); - atomic_set(&adev->gmc.vm_fault_info_updated, 1); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 2db9b2c63693..1cd9eaeef38f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -208,10 +208,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, struct amdgpu_userq_mgr *uqm, *tmp; unsigned int hung_db_num = 0; int queue_id, r, i; - u32 db_array[4]; + u32 db_array[8]; - if (db_array_size > 4) { - dev_err(adev->dev, "DB array size (%d vs 4) too small\n", + if (db_array_size > 8) { + dev_err(adev->dev, "DB array size (%d vs 8) too small\n", db_array_size); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e82188431f79..da575bb1377f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -66,7 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); #define GFX_MES_DRAM_SIZE 0x80000 #define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE) -#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */ +#define MES11_HUNG_HQD_INFO_OFFSET 4 static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring) { @@ -1720,8 +1721,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int pipe, r; - adev->mes.hung_queue_db_array_size = - MES11_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET; + for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index aff06f06aeee..7f3512d9de07 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -47,7 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); #define MES_EOP_SIZE 2048 -#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */ +#define MES12_HUNG_HQD_INFO_OFFSET 4 static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring) { @@ -228,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, pipe, x_pkt->header.opcode); r = amdgpu_fence_wait_polling(ring, seq, timeout); - if (r < 1 || !*status_ptr) { + + /* + * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success). + * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information. + */ + if (r < 1 || !(lower_32_bits(*status_ptr))) { if (misc_op_str) dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", @@ -1899,8 +1905,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int pipe, r; - adev->mes.hung_queue_db_array_size = - MES12_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET; + for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { r = amdgpu_mes_init_microcode(adev, pipe); if (r) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6c5c7c1bf5ed..6e7bc983fc0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, pr_debug_ratelimited("Evicting process pid %d queues\n", pdd->process->lead_thread->pid); + if (dqm->dev->kfd->shared_resources.enable_mes) { + pdd->last_evict_timestamp = get_jiffies_64(); + retval = suspend_all_queues_mes(dqm); + if (retval) { + dev_err(dev, "Suspending all queues failed"); + goto out; + } + } + /* Mark all queues as evicted. Deactivate all active queues on * the qpd. */ @@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, decrement_queue_count(dqm, qpd, q); if (dqm->dev->kfd->shared_resources.enable_mes) { - int err; - - err = remove_queue_mes(dqm, q, qpd); - if (err) { + retval = remove_queue_mes(dqm, q, qpd); + if (retval) { dev_err(dev, "Failed to evict queue %d\n", q->properties.queue_id); - retval = err; + goto out; } } } - pdd->last_evict_timestamp = get_jiffies_64(); - if (!dqm->dev->kfd->shared_resources.enable_mes) + + if (!dqm->dev->kfd->shared_resources.enable_mes) { + pdd->last_evict_timestamp = get_jiffies_64(); retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + } else { + retval = resume_all_queues_mes(dqm); + if (retval) + dev_err(dev, "Resuming all queues failed"); + } out: dqm_unlock(dqm); @@ -3098,61 +3111,17 @@ out: return ret; } -static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) -{ - struct device *dev = dqm->dev->adev->dev; - int ret = 0; - - /* Check if process is already evicted */ - dqm_lock(dqm); - if (qpd->evicted) { - /* Increment the evicted count to make sure the - * process stays evicted before its terminated. - */ - qpd->evicted++; - dqm_unlock(dqm); - goto out; - } - dqm_unlock(dqm); - - ret = suspend_all_queues_mes(dqm); - if (ret) { - dev_err(dev, "Suspending all queues failed"); - goto out; - } - - ret = dqm->ops.evict_process_queues(dqm, qpd); - if (ret) { - dev_err(dev, "Evicting process queues failed"); - goto out; - } - - ret = resume_all_queues_mes(dqm); - if (ret) - dev_err(dev, "Resuming all queues failed"); - -out: - return ret; -} - int kfd_evict_process_device(struct kfd_process_device *pdd) { struct device_queue_manager *dqm; struct kfd_process *p; - int ret = 0; p = pdd->process; dqm = pdd->dev->dqm; WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); - if (dqm->dev->kfd->shared_resources.enable_mes) - ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd); - else - ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); - - return ret; + return dqm->ops.evict_process_queues(dqm, &pdd->qpd); } int reserve_debug_trap_vmid(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 273f42e3afdd..9d72411c3379 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3045,6 +3045,8 @@ retry_write_locked: if (svms->checkpoint_ts[gpuidx] != 0) { if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) { pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + if (write_locked) + mmap_write_downgrade(mm); r = -EAGAIN; goto out_unlock_svms; } else { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8e1622bf7a42..6597475e245d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2000,6 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.disable_ips_in_vpb = 0; + /* DCN35 and above supports dynamic DTBCLK switch */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) + init_data.flags.allow_0_dtb_clk = true; + /* Enable DWB for tested platforms only */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; @@ -2081,8 +2085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); - adev->dm.restore_backlight = true; - adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); @@ -3438,7 +3440,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_resume(dm->dc); - adev->dm.restore_backlight = true; amdgpu_dm_irq_resume_early(adev); @@ -9965,6 +9966,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -10084,6 +10086,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -10140,16 +10143,13 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, * to fix a flicker issue. * It will cause the dm->actual_brightness is not the current panel brightness * level. (the dm->brightness is the correct panel level) - * So we set the backlight level with dm->brightness value after initial - * set mode. Use restore_backlight flag to avoid setting backlight level - * for every subsequent mode set. + * So we set the backlight level with dm->brightness value after set mode */ - if (dm->restore_backlight) { + if (set_backlight_level) { for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i]) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } - dm->restore_backlight = false; } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 009f206226f0..db75e991ac7b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -631,13 +631,6 @@ struct amdgpu_display_manager { u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; /** - * @restore_backlight: - * - * Flag to indicate whether to restore backlight after modeset. - */ - bool restore_backlight; - - /** * @aux_hpd_discon_quirk: * * quirk for hpd discon while aux is on-going. diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 2b1673d69ea8..1ab5ae9b5ea5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration( REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0); if (data->taps.h_taps + data->taps.v_taps <= 2) { - /* Set bypass */ - - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + /* Disable scaler functionality */ + REG_WRITE(SCL_SCALER_ENABLE, 0); + /* Clear registers that can cause glitches even when the scaler is off */ + REG_WRITE(SCL_TAP_CONTROL, 0); + REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); + REG_WRITE(SCL_F_SHARP_CONTROL, 0); return false; } @@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration( SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + REG_WRITE(SCL_SCALER_ENABLE, 1); /* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */ @@ -502,6 +505,8 @@ static void dce60_transform_set_scaler( REG_SET(DC_LB_MEM_SIZE, 0, DC_LB_MEM_SIZE, xfm_dce->lb_memory_size); + REG_WRITE(SCL_UPDATE, 0x00010000); + /* Clear SCL_F_SHARP_CONTROL value to 0 */ REG_WRITE(SCL_F_SHARP_CONTROL, 0); @@ -527,8 +532,7 @@ static void dce60_transform_set_scaler( if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { /* 4. Program vertical filters */ if (xfm_dce->filter_v == NULL) - REG_SET(SCL_VERT_FILTER_CONTROL, 0, - SCL_V_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_VERT_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.v_taps, @@ -542,8 +546,7 @@ static void dce60_transform_set_scaler( /* 5. Program horizontal filters */ if (xfm_dce->filter_h == NULL) - REG_SET(SCL_HORZ_FILTER_CONTROL, 0, - SCL_H_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.h_taps, @@ -566,6 +569,8 @@ static void dce60_transform_set_scaler( /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */ /* DCE6 DATA_FORMAT register does not support ALPHA_EN */ + + REG_WRITE(SCL_UPDATE, 0); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h index cbce194ec7b8..eb716e8337e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h @@ -155,6 +155,9 @@ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \ SRI(VIEWPORT_START, SCL, id), \ SRI(VIEWPORT_SIZE, SCL, id), \ + SRI(SCL_SCALER_ENABLE, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_INIT, SCL, id), \ @@ -590,6 +593,7 @@ struct dce_transform_registers { uint32_t SCL_VERT_FILTER_SCALE_RATIO; uint32_t SCL_HORZ_FILTER_INIT; #if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t SCL_SCALER_ENABLE; uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA; uint32_t SCL_HORZ_FILTER_INIT_CHROMA; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 17a21bcbde17..1a28061bb9ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) { + dc_assert_fp_enabled(); + return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0); } @@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate( struct _vcs_dpi_soc_bounding_box_st *soc, int pix_clk_100hz, int bpp, int seg_size_kb) { + dc_assert_fp_enabled(); + /* Roughly calculate required crb to hide latency. In practice there is slightly * more buffer available for latency hiding */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index c9dd920744c9..817a370e80a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -445,6 +445,8 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; + dc_assert_fp_enabled(); + dcn31_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); @@ -498,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; pipes[pipe_cnt].pipe.src.dcc_rate = 3; @@ -581,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) unsigned int i, plane_count = 0; DC_LOGGER_INIT(dc->ctx->logger); + dc_assert_fp_enabled(); + for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 8cda18ce1a76..77023b619f1e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -478,6 +478,8 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; + dc_assert_fp_enabled(); + dcn31_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); @@ -531,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; pipes[pipe_cnt].pipe.src.dcc_rate = 3; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 53c67ebe779f..b75be6ad64f6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -404,13 +404,13 @@ static const struct dc_plane_cap plane_cap = { }, .max_upscale_factor = { - .argb8888 = 16000, + .argb8888 = 1, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { - .argb8888 = 250, + .argb8888 = 1, .nv12 = 1, .fp16 = 1 } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 07552445e424..fff57f23f4f7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1760,6 +1760,20 @@ enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_stat } +static int populate_dml_pipes_from_context_fpu(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + enum dc_validate_mode validate_mode) +{ + int ret; + + DC_FP_START(); + ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); + DC_FP_END(); + + return ret; +} + static struct resource_funcs dcn35_res_pool_funcs = { .destroy = dcn35_destroy_resource_pool, .link_enc_create = dcn35_link_encoder_create, @@ -1770,7 +1784,7 @@ static struct resource_funcs dcn35_res_pool_funcs = { .validate_bandwidth = dcn35_validate_bandwidth, .calculate_wm_and_dlg = NULL, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu, + .populate_dml_pipes = populate_dml_pipes_from_context_fpu, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .release_pipe = dcn20_release_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index cb0478a9a34d..0abd163b425e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1732,6 +1732,21 @@ static enum dc_status dcn351_validate_bandwidth(struct dc *dc, return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } +static int populate_dml_pipes_from_context_fpu(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + enum dc_validate_mode validate_mode) +{ + int ret; + + DC_FP_START(); + ret = dcn351_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); + DC_FP_END(); + + return ret; + +} + static struct resource_funcs dcn351_res_pool_funcs = { .destroy = dcn351_destroy_resource_pool, .link_enc_create = dcn35_link_encoder_create, @@ -1742,7 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = { .validate_bandwidth = dcn351_validate_bandwidth, .calculate_wm_and_dlg = NULL, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu, + .populate_dml_pipes = populate_dml_pipes_from_context_fpu, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .release_pipe = dcn20_release_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index 126090c9bb8a..ca125ee6c2fb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -1734,6 +1734,20 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc, } +static int populate_dml_pipes_from_context_fpu(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + enum dc_validate_mode validate_mode) +{ + int ret; + + DC_FP_START(); + ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); + DC_FP_END(); + + return ret; +} + static struct resource_funcs dcn36_res_pool_funcs = { .destroy = dcn36_destroy_resource_pool, .link_enc_create = dcn35_link_encoder_create, @@ -1744,7 +1758,7 @@ static struct resource_funcs dcn36_res_pool_funcs = { .validate_bandwidth = dcn35_validate_bandwidth, .calculate_wm_and_dlg = NULL, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu, + .populate_dml_pipes = populate_dml_pipes_from_context_fpu, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .release_pipe = dcn20_release_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index 55b929ca7982..b1fb0f8a253a 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -641,16 +641,16 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, /* this gives the direction of the cositing (negative will move * left, right otherwise) */ - int sign = 1; + int h_sign = flip_horz_scan_dir ? -1 : 1; + int v_sign = flip_vert_scan_dir ? -1 : 1; switch (spl_in->basic_in.cositing) { - case CHROMA_COSITING_TOPLEFT: - init_adj_h = spl_fixpt_from_fraction(sign, 4); - init_adj_v = spl_fixpt_from_fraction(sign, 4); + init_adj_h = spl_fixpt_from_fraction(h_sign, 4); + init_adj_v = spl_fixpt_from_fraction(v_sign, 4); break; case CHROMA_COSITING_LEFT: - init_adj_h = spl_fixpt_from_fraction(sign, 4); + init_adj_h = spl_fixpt_from_fraction(h_sign, 4); init_adj_v = spl_fixpt_zero; break; case CHROMA_COSITING_NONE: diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h index 9de01ae574c0..067eddd9c62d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h @@ -4115,6 +4115,7 @@ #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40 #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL0_SCL_SCALER_ENABLE 0x1B42 #define mmSCL0_SCL_CONTROL 0x1B44 #define mmSCL0_SCL_DEBUG 0x1B6A #define mmSCL0_SCL_DEBUG2 0x1B69 @@ -4144,6 +4145,7 @@ #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55 #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40 #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41 +#define mmSCL1_SCL_SCALER_ENABLE 0x1E42 #define mmSCL1_SCL_CONTROL 0x1E44 #define mmSCL1_SCL_DEBUG 0x1E6A #define mmSCL1_SCL_DEBUG2 0x1E69 @@ -4173,6 +4175,7 @@ #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155 #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140 #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141 +#define mmSCL2_SCL_SCALER_ENABLE 0x4142 #define mmSCL2_SCL_CONTROL 0x4144 #define mmSCL2_SCL_DEBUG 0x416A #define mmSCL2_SCL_DEBUG2 0x4169 @@ -4202,6 +4205,7 @@ #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455 #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440 #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441 +#define mmSCL3_SCL_SCALER_ENABLE 0x4442 #define mmSCL3_SCL_CONTROL 0x4444 #define mmSCL3_SCL_DEBUG 0x446A #define mmSCL3_SCL_DEBUG2 0x4469 @@ -4231,6 +4235,7 @@ #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755 #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740 #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741 +#define mmSCL4_SCL_SCALER_ENABLE 0x4742 #define mmSCL4_SCL_CONTROL 0x4744 #define mmSCL4_SCL_DEBUG 0x476A #define mmSCL4_SCL_DEBUG2 0x4769 @@ -4260,6 +4265,7 @@ #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55 #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40 #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41 +#define mmSCL5_SCL_SCALER_ENABLE 0x4A42 #define mmSCL5_SCL_CONTROL 0x4A44 #define mmSCL5_SCL_DEBUG 0x4A6A #define mmSCL5_SCL_DEBUG2 0x4A69 @@ -4287,6 +4293,7 @@ #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL_COEF_RAM_SELECT 0x1B40 #define mmSCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL_SCALER_ENABLE 0x1B42 #define mmSCL_CONTROL 0x1B44 #define mmSCL_DEBUG 0x1B6A #define mmSCL_DEBUG2 0x1B69 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index 2d6a598a6c25..9317a7afa621 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -8650,6 +8650,8 @@ #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000 #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000 +#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L +#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000 #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000 #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index cf9932e68055..3a9522c17fee 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3500,6 +3500,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, * for these GPUs to calculate bandwidth requirements. */ if (high_pixelclock_count) { + /* Work around flickering lines at the bottom edge + * of the screen when using a single 4K 60Hz monitor. + */ + disable_mclk_switching = true; + /* On Oland, we observe some flickering when two 4K 60Hz * displays are connected, possibly because voltage is too low. * Raise the voltage by requiring a higher SCLK. diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 8da882c51856..9b28c0728269 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; else if (hwmgr->pp_table_version == PP_TABLE_V0) - thermal_data->max = data->thermal_temp_setting.temperature_shutdown * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + thermal_data->max = data->thermal_temp_setting.temperature_shutdown; thermal_data->sw_ctf_threshold = thermal_data->max; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 1a1f2a6b2e52..a89075e25717 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -288,7 +288,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) * Considering above, we just leave user a verbal message instead * of halt driver loading. */ - if (if_version != smu->smc_driver_if_version) { + if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION && + if_version != smu->smc_driver_if_version) { dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", smu->smc_driver_if_version, if_version, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index cbe5b06438c1..285cf7979693 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -450,8 +450,7 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) ((pgm == 4) && (fw_ver >= 0x4557000))) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); - if (((pgm == 0) && (fw_ver >= 0x00558200)) || - ((pgm == 4) && (fw_ver >= 0x04557100))) + if ((pgm == 0) && (fw_ver >= 0x00558200)) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); } @@ -3933,7 +3932,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; - smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; + smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION; smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); smu_v13_0_6_set_temp_funcs(smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index d588f74b98de..0ae91c8b6d72 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,6 +40,8 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF + #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ do { \ typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index b4e8edc7c767..30b011ed0a05 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -836,22 +836,24 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ast_device *ast = to_ast_device(crtc->dev); + u8 vgacr17 = 0x00; + u8 vgacrb6 = 0xff; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, 0x00); - ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0x00); + vgacr17 |= AST_IO_VGACR17_SYNC_ENABLE; + vgacrb6 &= ~(AST_IO_VGACRB6_VSYNC_OFF | AST_IO_VGACRB6_HSYNC_OFF); + + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17); + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6); } static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct ast_device *ast = to_ast_device(crtc->dev); - u8 vgacrb6; + u8 vgacr17 = 0xff; - ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, AST_IO_VGASR1_SD); - - vgacrb6 = AST_IO_VGACRB6_VSYNC_OFF | - AST_IO_VGACRB6_HSYNC_OFF; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6); + vgacr17 &= ~AST_IO_VGACR17_SYNC_ENABLE; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17); /* * HW cursors require the underlying primary plane and CRTC to diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index e15adaf3a80e..30578e3b07e4 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -29,6 +29,7 @@ #define AST_IO_VGAGRI (0x4E) #define AST_IO_VGACRI (0x54) +#define AST_IO_VGACR17_SYNC_ENABLE BIT(7) /* called "Hardware reset" in docs */ #define AST_IO_VGACR80_PASSWORD (0xa8) #define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0) #define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index 399fa7eebd49..03fc8fd10f20 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -121,8 +121,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx) } /* Test for known Chip ID. */ - if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || - chipid[2] != REG_CHIPID2_VALUE) { + if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) { dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n", chipid[0], chipid[1], chipid[2]); return -EINVAL; diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c index 9dc0408fbbea..5b956229c82f 100644 --- a/drivers/gpu/drm/drm_draw.c +++ b/drivers/gpu/drm/drm_draw.c @@ -127,7 +127,7 @@ EXPORT_SYMBOL(drm_draw_fill16); void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, - u16 color) + u32 color) { unsigned int y, x; diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h index f121ee7339dc..20cb404e23ea 100644 --- a/drivers/gpu/drm/drm_draw_internal.h +++ b/drivers/gpu/drm/drm_draw_internal.h @@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch, void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, - u16 color); + u32 color); void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index eeeeb99cfdf6..cb906765897e 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -361,7 +361,6 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = { * @name: Name of the GPU SVM. * @drm: Pointer to the DRM device structure. * @mm: Pointer to the mm_struct for the address space. - * @device_private_page_owner: Device private pages owner. * @mm_start: Start address of GPU SVM. * @mm_range: Range of the GPU SVM. * @notifier_size: Size of individual notifiers. @@ -383,7 +382,7 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = { */ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, const char *name, struct drm_device *drm, - struct mm_struct *mm, void *device_private_page_owner, + struct mm_struct *mm, unsigned long mm_start, unsigned long mm_range, unsigned long notifier_size, const struct drm_gpusvm_ops *ops, @@ -395,15 +394,13 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, mmgrab(mm); } else { /* No full SVM mode, only core drm_gpusvm_pages API. */ - if (ops || num_chunks || mm_range || notifier_size || - device_private_page_owner) + if (ops || num_chunks || mm_range || notifier_size) return -EINVAL; } gpusvm->name = name; gpusvm->drm = drm; gpusvm->mm = mm; - gpusvm->device_private_page_owner = device_private_page_owner; gpusvm->mm_start = mm_start; gpusvm->mm_range = mm_range; gpusvm->notifier_size = notifier_size; @@ -684,6 +681,7 @@ static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn, * @notifier: Pointer to the GPU SVM notifier structure * @start: Start address * @end: End address + * @dev_private_owner: The device private page owner * * Check if pages between start and end have been faulted in on the CPU. Use to * prevent migration of pages without CPU backing store. @@ -692,14 +690,15 @@ static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn, */ static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm, struct drm_gpusvm_notifier *notifier, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, + void *dev_private_owner) { struct hmm_range hmm_range = { .default_flags = 0, .notifier = ¬ifier->notifier, .start = start, .end = end, - .dev_private_owner = gpusvm->device_private_page_owner, + .dev_private_owner = dev_private_owner, }; unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); @@ -753,6 +752,7 @@ err_free: * @gpuva_start: Start address of GPUVA which mirrors CPU * @gpuva_end: End address of GPUVA which mirrors CPU * @check_pages_threshold: Check CPU pages for present threshold + * @dev_private_owner: The device private page owner * * This function determines the chunk size for the GPU SVM range based on the * fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual @@ -767,7 +767,8 @@ drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm, unsigned long fault_addr, unsigned long gpuva_start, unsigned long gpuva_end, - unsigned long check_pages_threshold) + unsigned long check_pages_threshold, + void *dev_private_owner) { unsigned long start, end; int i = 0; @@ -814,7 +815,7 @@ retry: * process-many-malloc' mallocs at least 64k at a time. */ if (end - start <= check_pages_threshold && - !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) { + !drm_gpusvm_check_pages(gpusvm, notifier, start, end, dev_private_owner)) { ++i; goto retry; } @@ -957,7 +958,8 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas, fault_addr, gpuva_start, gpuva_end, - ctx->check_pages_threshold); + ctx->check_pages_threshold, + ctx->device_private_page_owner); if (chunk_size == LONG_MAX) { err = -EINVAL; goto err_notifier_remove; @@ -1268,7 +1270,7 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, .notifier = notifier, .start = pages_start, .end = pages_end, - .dev_private_owner = gpusvm->device_private_page_owner, + .dev_private_owner = ctx->device_private_page_owner, }; void *zdd; unsigned long timeout = diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 22a4a1575d22..b817ff44c043 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -2113,10 +2113,10 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) if (intel_fb_uses_dpt(fb)) intel_dpt_destroy(intel_fb->dpt_vm); - intel_frontbuffer_put(intel_fb->frontbuffer); - intel_fb_bo_framebuffer_fini(intel_fb_bo(fb)); + intel_frontbuffer_put(intel_fb->frontbuffer); + kfree(intel_fb); } @@ -2218,15 +2218,17 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, int ret = -EINVAL; int i; + /* + * intel_frontbuffer_get() must be done before + * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race. + */ + intel_fb->frontbuffer = intel_frontbuffer_get(obj); + if (!intel_fb->frontbuffer) + return -ENOMEM; + ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd); if (ret) - return ret; - - intel_fb->frontbuffer = intel_frontbuffer_get(obj); - if (!intel_fb->frontbuffer) { - ret = -ENOMEM; - goto err; - } + goto err_frontbuffer_put; ret = -EINVAL; if (!drm_any_plane_has_format(display->drm, @@ -2235,7 +2237,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "unsupported pixel format %p4cc / modifier 0x%llx\n", &mode_cmd->pixel_format, mode_cmd->modifier[0]); - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; } max_stride = intel_fb_max_stride(display, mode_cmd->pixel_format, @@ -2246,7 +2248,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? "tiled" : "linear", mode_cmd->pitches[0], max_stride); - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; } /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ @@ -2254,7 +2256,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "plane 0 offset (0x%08x) must be 0\n", mode_cmd->offsets[0]); - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; } drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd); @@ -2264,7 +2266,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (mode_cmd->handles[i] != mode_cmd->handles[0]) { drm_dbg_kms(display->drm, "bad plane %d handle\n", i); - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; } stride_alignment = intel_fb_stride_alignment(fb, i); @@ -2272,7 +2274,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "plane %d pitch (%d) must be at least %u byte aligned\n", i, fb->pitches[i], stride_alignment); - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; } if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { @@ -2282,7 +2284,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(display->drm, "ccs aux plane %d pitch (%d) must be %d\n", i, fb->pitches[i], ccs_aux_stride); - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; } } @@ -2291,7 +2293,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, ret = intel_fill_fb_info(display, intel_fb); if (ret) - goto err_frontbuffer_put; + goto err_bo_framebuffer_fini; if (intel_fb_uses_dpt(fb)) { struct i915_address_space *vm; @@ -2317,10 +2319,10 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, err_free_dpt: if (intel_fb_uses_dpt(fb)) intel_dpt_destroy(intel_fb->dpt_vm); +err_bo_framebuffer_fini: + intel_fb_bo_framebuffer_fini(obj); err_frontbuffer_put: intel_frontbuffer_put(intel_fb->frontbuffer); -err: - intel_fb_bo_framebuffer_fini(obj); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 43be5377ddc1..73ed28ac9573 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -270,6 +270,8 @@ static void frontbuffer_release(struct kref *ref) spin_unlock(&display->fb_tracking.lock); i915_active_fini(&front->write); + + drm_gem_object_put(obj); kfree_rcu(front, rcu); } @@ -287,6 +289,8 @@ intel_frontbuffer_get(struct drm_gem_object *obj) if (!front) return NULL; + drm_gem_object_get(obj); + front->obj = obj; kref_init(&front->ref); atomic_set(&front->bits, 0); @@ -299,8 +303,12 @@ intel_frontbuffer_get(struct drm_gem_object *obj) spin_lock(&display->fb_tracking.lock); cur = intel_bo_set_frontbuffer(obj, front); spin_unlock(&display->fb_tracking.lock); - if (cur != front) + + if (cur != front) { + drm_gem_object_put(obj); kfree(front); + } + return cur; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 01bf304c705f..10eb93a34cf2 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -3402,6 +3402,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) struct intel_display *display = to_intel_display(intel_dp); if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { + /* Selective fetch prior LNL */ if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* can we turn CFF off? */ if (intel_dp->psr.busy_frontbuffer_bits == 0) @@ -3420,12 +3421,19 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) intel_psr_configure_full_frame_update(intel_dp); intel_psr_force_update(intel_dp); + } else if (!intel_dp->psr.psr2_sel_fetch_enabled) { + /* + * PSR1 on all platforms + * PSR2 HW tracking + * Panel Replay Full frame update + */ + intel_psr_force_update(intel_dp); } else { + /* Selective update LNL onwards */ intel_psr_exit(intel_dp); } - if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) && - !intel_dp->psr.busy_frontbuffer_bits) + if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) queue_work(display->wq.unordered, &intel_dp->psr.work); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h index b6dc3d1b9bb1..b682969e3a29 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h @@ -89,12 +89,10 @@ i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj, if (!front) { RCU_INIT_POINTER(obj->frontbuffer, NULL); - drm_gem_object_put(intel_bo_to_drm_bo(obj)); } else if (rcu_access_pointer(obj->frontbuffer)) { cur = rcu_dereference_protected(obj->frontbuffer, true); kref_get(&cur->ref); } else { - drm_gem_object_get(intel_bo_to_drm_bo(obj)); rcu_assign_pointer(obj->frontbuffer, front); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 3e7e5badcc2b..2c651ec024ef 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -1325,9 +1325,16 @@ static int ct_receive(struct intel_guc_ct *ct) static void ct_try_receive_message(struct intel_guc_ct *ct) { + struct intel_guc *guc = ct_to_guc(ct); int ret; - if (GEM_WARN_ON(!ct->enabled)) + if (!ct->enabled) { + GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress); + return; + } + + /* When interrupt disabled, message handling is not expected */ + if (!guc->interrupts.enabled) return; ret = ct_receive(ct); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b96f0555ca14..f26562eafffc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -929,7 +929,7 @@ done: nvif_vmm_put(vmm, &old_mem->vma[1]); nvif_vmm_put(vmm, &old_mem->vma[0]); } - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 9bf06e55eaee..df767e82148a 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1099,6 +1099,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang) } panthor_job_irq_suspend(&ptdev->fw->irq); + panthor_fw_stop(ptdev); } /** diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index b50927a824b4..7ec7bea5e38e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1031,7 +1031,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, return format; if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 || - drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) { + drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) { drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", drm_rect_width(src) >> 16, drm_rect_height(src) >> 16, drm_rect_width(dest), drm_rect_height(dest)); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 46119aacb809..c39f0245e3a9 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -965,13 +965,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, dma_resv_assert_held(resv); dma_resv_for_each_fence(&cursor, resv, usage, fence) { - /* Make sure to grab an additional ref on the added fence */ - dma_fence_get(fence); - ret = drm_sched_job_add_dependency(job, fence); - if (ret) { - dma_fence_put(fence); + /* + * As drm_sched_job_add_dependency always consumes the fence + * reference (even when it fails), and dma_resv_for_each_fence + * is not obtaining one, we need to grab one before calling. + */ + ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); + if (ret) return ret; - } } return 0; } diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 0317f3d7452a..1884686985b8 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -62,6 +62,8 @@ struct v3d_queue_state { /* Currently active job for this queue */ struct v3d_job *active_job; spinlock_t queue_lock; + /* Protect dma fence for signalling job completion */ + spinlock_t fence_lock; }; /* Performance monitor object. The perform lifetime is controlled by userspace diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c index 8f8471adae34..c82500a1df73 100644 --- a/drivers/gpu/drm/v3d/v3d_fence.c +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -15,7 +15,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q) fence->dev = &v3d->drm; fence->queue = q; fence->seqno = ++queue->emit_seqno; - dma_fence_init(&fence->base, &v3d_fence_ops, &queue->queue_lock, + dma_fence_init(&fence->base, &v3d_fence_ops, &queue->fence_lock, queue->fence_context, fence->seqno); return &fence->base; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index c77d90aa9b82..bb110d35f749 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -273,6 +273,7 @@ v3d_gem_init(struct drm_device *dev) seqcount_init(&queue->stats.lock); spin_lock_init(&queue->queue_lock); + spin_lock_init(&queue->fence_lock); } spin_lock_init(&v3d->mm_lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 819704ac675d..d539f25b5fbe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1497,6 +1497,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo = NULL; + struct vmw_resource *res; struct vmw_surface *srf = NULL; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); int ret; @@ -1532,18 +1533,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? VMW_RES_DIRTY_SET : 0; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - dirty, user_surface_converter, - &cmd->body.host.sid, NULL); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty, + user_surface_converter, &cmd->body.host.sid, + NULL); if (unlikely(ret != 0)) { if (unlikely(ret != -ERESTARTSYS)) VMW_DEBUG_USER("could not find surface for DMA.\n"); return ret; } - srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); + res = sw_context->res_cache[vmw_res_surface].res; + if (!res) { + VMW_DEBUG_USER("Invalid DMA surface.\n"); + return -EINVAL; + } - vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header); + srf = vmw_res_to_srf(res); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, + header); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index 7ee93e7191c7..35dc94c3db39 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -308,8 +308,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); } node->res = vmw_resource_reference_unless_doomed(res); - if (!node->res) + if (!node->res) { + hash_del_rcu(&node->hash.head); return -ESRCH; + } node->first_usage = 1; if (!res->dev_priv->has_mob) { @@ -636,7 +638,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) hash_del_rcu(&val->hash.head); list_for_each_entry(val, &ctx->resource_ctx_list, head) - hash_del_rcu(&entry->hash.head); + hash_del_rcu(&val->hash.head); ctx->sw_context = NULL; } diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 06cb6b02ec64..51f2a03847f9 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -342,6 +342,7 @@ #define POWERGATE_ENABLE XE_REG(0xa210) #define RENDER_POWERGATE_ENABLE REG_BIT(0) #define MEDIA_POWERGATE_ENABLE REG_BIT(1) +#define MEDIA_SAMPLERS_POWERGATE_ENABLE REG_BIT(2) #define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n)) #define VDN_MFXVDENC_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n)) diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index 49b37dfd4e58..663a79ec960d 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -66,6 +66,7 @@ KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc); /** * xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters + * @test: test context object * @prev: the pointer to the previous parameter to iterate from or NULL * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE * @@ -211,15 +212,15 @@ static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc) * param generator can be used for both */ static const struct xe_ip pre_gmdid_graphics_ips[] = { - graphics_ip_xelp, - graphics_ip_xelpp, - graphics_ip_xehpg, - graphics_ip_xehpc, + { 1200, "Xe_LP", &graphics_xelp }, + { 1210, "Xe_LP+", &graphics_xelp }, + { 1255, "Xe_HPG", &graphics_xehpg }, + { 1260, "Xe_HPC", &graphics_xehpc }, }; static const struct xe_ip pre_gmdid_media_ips[] = { - media_ip_xem, - media_ip_xehpm, + { 1200, "Xe_M", &media_xem }, + { 1255, "Xe_HPM", &media_xem }, }; KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc); @@ -242,6 +243,7 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc); /** * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters + * @test: test context object * @prev: the pointer to the previous parameter to iterate from or NULL * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE * @@ -266,6 +268,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param); /** * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters + * @test: test context object * @prev: the pointer to the previous parameter to iterate from or NULL * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE * @@ -290,6 +293,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param); /** * xe_pci_id_gen_param - Generate struct pci_device_id parameters + * @test: test context object * @prev: the pointer to the previous parameter to iterate from or NULL * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE * @@ -376,6 +380,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init); /** * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters + * @test: test context object * @prev: the previously returned value, or NULL for the first iteration * @desc: the buffer for a parameter name * diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 8422f3cab113..4410e28dee54 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1737,6 +1737,24 @@ static bool should_migrate_to_smem(struct xe_bo *bo) bo->attr.atomic_access == DRM_XE_ATOMIC_CPU; } +static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx) +{ + long lerr; + + if (ctx->no_wait_gpu) + return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ? + 0 : -EBUSY; + + lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, + ctx->interruptible, MAX_SCHEDULE_TIMEOUT); + if (lerr < 0) + return lerr; + if (lerr == 0) + return -EBUSY; + + return 0; +} + /* Populate the bo if swapped out, or migrate if the access mode requires that. */ static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx, struct drm_exec *exec) @@ -1745,10 +1763,9 @@ static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx, int err = 0; if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) { - xe_assert(xe_bo_device(bo), - dma_resv_test_signaled(tbo->base.resv, DMA_RESV_USAGE_KERNEL) || - (tbo->ttm && ttm_tt_is_populated(tbo->ttm))); - err = ttm_bo_populate(&bo->ttm, ctx); + err = xe_bo_wait_usage_kernel(bo, ctx); + if (!err) + err = ttm_bo_populate(&bo->ttm, ctx); } else if (should_migrate_to_smem(bo)) { xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM); err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec); @@ -1922,7 +1939,6 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf) .no_wait_gpu = false, .gfp_retry_mayfail = retry_after_wait, }; - long lerr; err = drm_exec_lock_obj(&exec, &tbo->base); drm_exec_retry_on_contention(&exec); @@ -1942,13 +1958,9 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf) break; } - lerr = dma_resv_wait_timeout(tbo->base.resv, - DMA_RESV_USAGE_KERNEL, true, - MAX_SCHEDULE_TIMEOUT); - if (lerr < 0) { - err = lerr; + err = xe_bo_wait_usage_kernel(bo, &tctx); + if (err) break; - } if (!retry_after_wait) ret = __xe_bo_cpu_fault(vmf, xe, bo); diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index d5dbc51e8612..bc5b4c5fab81 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -182,7 +182,6 @@ int xe_bo_evict_all(struct xe_device *xe) static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo) { - struct xe_device *xe = xe_bo_device(bo); int ret; ret = xe_bo_restore_pinned(bo); @@ -201,13 +200,6 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo) } } - /* - * We expect validate to trigger a move VRAM and our move code - * should setup the iosys map. - */ - xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) || - !iosys_map_is_null(&bo->vmap)); - return 0; } diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 8a9b950e7a6d..139663423185 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -126,8 +126,20 @@ * not intended for normal execution and will taint the kernel with TAINT_TEST * when used. * - * Currently this is implemented only for post and mid context restore. - * Examples: + * The syntax allows to pass straight instructions to be executed by the engine + * in a batch buffer or set specific registers. + * + * #. Generic instruction:: + * + * <engine-class> cmd <instr> [[dword0] [dword1] [...]] + * + * #. Simple register setting:: + * + * <engine-class> reg <address> <value> + * + * Commands are saved per engine class: all instances of that class will execute + * those commands during context switch. The instruction, dword arguments, + * addresses and values are in hex format like in the examples below. * * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the * normal context restore:: @@ -154,7 +166,8 @@ * When using multiple lines, make sure to use a command that is * implemented with a single write syscall, like HEREDOC. * - * These attributes can only be set before binding to the device. + * Currently this is implemented only for post and mid context restore and + * these attributes can only be set before binding to the device. * * Remove devices * ============== @@ -324,8 +337,8 @@ static const struct engine_info *lookup_engine_info(const char *pattern, u64 *ma continue; pattern += strlen(engine_info[i].cls); - if (!mask && !*pattern) - return &engine_info[i]; + if (!mask) + return *pattern ? NULL : &engine_info[i]; if (!strcmp(pattern, "*")) { *mask = engine_info[i].mask; diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index fdb7b7498920..34d33965eac2 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -685,16 +685,16 @@ static int wait_for_lmem_ready(struct xe_device *xe) } ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */ -static void sriov_update_device_info(struct xe_device *xe) +static void vf_update_device_info(struct xe_device *xe) { + xe_assert(xe, IS_SRIOV_VF(xe)); /* disable features that are not available/applicable to VFs */ - if (IS_SRIOV_VF(xe)) { - xe->info.probe_display = 0; - xe->info.has_heci_cscfi = 0; - xe->info.has_heci_gscfi = 0; - xe->info.skip_guc_pc = 1; - xe->info.skip_pcode = 1; - } + xe->info.probe_display = 0; + xe->info.has_heci_cscfi = 0; + xe->info.has_heci_gscfi = 0; + xe->info.has_late_bind = 0; + xe->info.skip_guc_pc = 1; + xe->info.skip_pcode = 1; } static int xe_device_vram_alloc(struct xe_device *xe) @@ -735,7 +735,8 @@ int xe_device_probe_early(struct xe_device *xe) xe_sriov_probe_early(xe); - sriov_update_device_info(xe); + if (IS_SRIOV_VF(xe)) + vf_update_device_info(xe); err = xe_pcode_probe_early(xe); if (err || xe_survivability_mode_is_requested(xe)) { @@ -1069,7 +1070,7 @@ void xe_device_l2_flush(struct xe_device *xe) spin_lock(>->global_invl_lock); xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) + if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index f8950a52d0a4..bdc9d9877ec4 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -124,6 +124,9 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) if (xe_gt_is_main_type(gt)) gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE; + if (MEDIA_VERx100(xe) >= 1100 && MEDIA_VERx100(xe) < 1255) + gtidle->powergate_enable |= MEDIA_SAMPLERS_POWERGATE_ENABLE; + if (xe->info.platform != XE_DG1) { for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) { if ((gt->info.engine_mask & BIT(i))) @@ -246,6 +249,11 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p) drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n, str_up_down(pg_status & media_slices[n].status_bit)); } + + if (MEDIA_VERx100(xe) >= 1100 && MEDIA_VERx100(xe) < 1255) + drm_printf(p, "Media Samplers Power Gating Enabled: %s\n", + str_yes_no(pg_enabled & MEDIA_SAMPLERS_POWERGATE_ENABLE)); + return 0; } diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 53024eb5670b..94ed8159496f 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -44,6 +44,7 @@ #include "xe_ring_ops_types.h" #include "xe_sched_job.h" #include "xe_trace.h" +#include "xe_uc_fw.h" #include "xe_vm.h" static struct xe_guc * @@ -1489,7 +1490,17 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg) xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); trace_xe_exec_queue_cleanup_entity(q); - if (exec_queue_registered(q)) + /* + * Expected state transitions for cleanup: + * - If the exec queue is registered and GuC firmware is running, we must first + * disable scheduling and deregister the queue to ensure proper teardown and + * resource release in the GuC, then destroy the exec queue on driver side. + * - If the GuC is already stopped (e.g., during driver unload or GPU reset), + * we cannot expect a response for the deregister request. In this case, + * it is safe to directly destroy the exec queue on driver side, as the GuC + * will not process further requests and all resources must be cleaned up locally. + */ + if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw)) disable_scheduling_deregister(guc, q); else __guc_exec_queue_destroy(guc, q); diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c index 58bee3ffe881..fa4db5f23342 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_group.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c @@ -213,17 +213,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group err = q->ops->suspend_wait(q); if (err) - goto err_suspend; + return err; } if (need_resume) xe_hw_engine_group_resume_faulting_lr_jobs(group); return 0; - -err_suspend: - up_write(&group->mode_sem); - return err; } /** diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.c b/drivers/gpu/drm/xe/xe_late_bind_fw.c index 38f3feb2aecd..768442ca7da6 100644 --- a/drivers/gpu/drm/xe/xe_late_bind_fw.c +++ b/drivers/gpu/drm/xe/xe_late_bind_fw.c @@ -60,7 +60,7 @@ static int parse_cpd_header(struct xe_late_bind_fw *lb_fw, const struct gsc_manifest_header *manifest; const struct gsc_cpd_entry *entry; size_t min_size = sizeof(*header); - u32 offset; + u32 offset = 0; int i; /* manifest_entry is mandatory */ @@ -116,7 +116,7 @@ static int parse_lb_layout(struct xe_late_bind_fw *lb_fw, const struct csc_fpt_header *header = data; const struct csc_fpt_entry *entry; size_t min_size = sizeof(*header); - u32 offset; + u32 offset = 0; int i; /* fpt_entry is mandatory */ @@ -184,17 +184,13 @@ static const char *xe_late_bind_parse_status(uint32_t status) } } -static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind) +static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind, u32 *num_fans) { struct xe_device *xe = late_bind_to_xe(late_bind); struct xe_tile *root_tile = xe_device_get_root_tile(xe); - u32 uval; - if (!xe_pcode_read(root_tile, - PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), &uval, NULL)) - return uval; - else - return 0; + return xe_pcode_read(root_tile, + PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), num_fans, NULL); } void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind) @@ -314,7 +310,11 @@ static int __xe_late_bind_fw_init(struct xe_late_bind *late_bind, u32 fw_id) lb_fw->flags &= ~INTEL_LB_FLAG_IS_PERSISTENT; if (lb_fw->type == INTEL_LB_TYPE_FAN_CONTROL) { - num_fans = xe_late_bind_fw_num_fans(late_bind); + ret = xe_late_bind_fw_num_fans(late_bind, &num_fans); + if (ret) { + drm_dbg(&xe->drm, "Failed to read number of fans: %d\n", ret); + return 0; /* Not a fatal error, continue without fan control */ + } drm_dbg(&xe->drm, "Number of Fans: %d\n", num_fans); if (!num_fans) return 0; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 1d667fa36cf3..a36ce7dce8cc 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -434,7 +434,7 @@ int xe_migrate_init(struct xe_migrate *m) err = xe_migrate_lock_prepare_vm(tile, m, vm); if (err) - return err; + goto err_out; if (xe->info.has_usm) { struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt, @@ -2113,7 +2113,9 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, if (current_bytes & ~PAGE_MASK) { int pitch = 4; - current_bytes = min_t(int, current_bytes, S16_MAX * pitch); + current_bytes = min_t(int, current_bytes, + round_down(S16_MAX * pitch, + XE_CACHELINE_BYTES)); } __fence = xe_migrate_vram(m, current_bytes, diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index be91343829dd..9a6df79fc5b6 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -867,6 +867,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; + xe_vram_resize_bar(xe); + err = xe_device_probe_early(xe); /* * In Boot Survivability mode, no drm card is exposed and driver diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index d6625c71115b..2c5a44377994 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -201,7 +201,7 @@ int xe_pm_resume(struct xe_device *xe) if (err) goto err; - xe_i2c_pm_resume(xe, xe->d3cold.allowed); + xe_i2c_pm_resume(xe, true); xe_irq_resume(xe); diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index e1b603aba61b..2e9ff33ed2fe 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -276,8 +276,7 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[0].instance = 0; mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; - if (perfmon_capable()) - mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); + mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); mem_regions->num_mem_regions = 1; for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { @@ -293,13 +292,11 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = man->size; - if (perfmon_capable()) { - xe_ttm_vram_get_used(man, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].used, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].cpu_visible_used); - } + xe_ttm_vram_get_used(man, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].used, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].cpu_visible_used); mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = xe_ttm_vram_get_cpu_visible_size(man); diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 7f2f1f041f1d..da2a412f80c0 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -67,11 +67,6 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) range_debug(range, operation); } -static void *xe_svm_devm_owner(struct xe_device *xe) -{ - return xe; -} - static struct drm_gpusvm_range * xe_svm_range_alloc(struct drm_gpusvm *gpusvm) { @@ -744,15 +739,14 @@ int xe_svm_init(struct xe_vm *vm) xe_svm_garbage_collector_work_func); err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, - current->mm, xe_svm_devm_owner(vm->xe), 0, - vm->size, + current->mm, 0, vm->size, xe_modparam.svm_notifier_size * SZ_1M, &gpusvm_ops, fault_chunk_sizes, ARRAY_SIZE(fault_chunk_sizes)); drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); } else { err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", - &vm->xe->drm, NULL, NULL, 0, 0, 0, NULL, + &vm->xe->drm, NULL, 0, 0, 0, NULL, NULL, 0); } @@ -1017,6 +1011,7 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, .devmem_only = need_vram && devmem_possible, .timeslice_ms = need_vram && devmem_possible ? vm->xe->atomic_svm_timeslice_ms : 0, + .device_private_page_owner = xe_svm_devm_owner(vm->xe), }; struct xe_validation_ctx vctx; struct drm_exec exec; @@ -1039,6 +1034,9 @@ retry: if (err) return err; + dpagemap = xe_vma_resolve_pagemap(vma, tile); + if (!dpagemap && !ctx.devmem_only) + ctx.device_private_page_owner = NULL; range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx); if (IS_ERR(range)) @@ -1059,7 +1057,6 @@ retry: range_debug(range, "PAGE FAULT"); - dpagemap = xe_vma_resolve_pagemap(vma, tile); if (--migrate_try_count >= 0 && xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) { ktime_t migrate_start = xe_svm_stats_ktime_get(); @@ -1078,7 +1075,17 @@ retry: drm_dbg(&vm->xe->drm, "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n", vm->usm.asid, ERR_PTR(err)); - goto retry; + + /* + * In the devmem-only case, mixed mappings may + * be found. The get_pages function will fix + * these up to a single location, allowing the + * page fault handler to make forward progress. + */ + if (ctx.devmem_only) + goto get_pages; + else + goto retry; } else { drm_err(&vm->xe->drm, "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n", @@ -1088,6 +1095,7 @@ retry: } } +get_pages: get_pages_start = xe_svm_stats_ktime_get(); range_debug(range, "GET PAGES"); diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index cef6ee7d6fe3..0955d2ac8d74 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -6,6 +6,20 @@ #ifndef _XE_SVM_H_ #define _XE_SVM_H_ +struct xe_device; + +/** + * xe_svm_devm_owner() - Return the owner of device private memory + * @xe: The xe device. + * + * Return: The owner of this device's device private memory to use in + * hmm_range_fault()- + */ +static inline void *xe_svm_devm_owner(struct xe_device *xe) +{ + return xe; +} + #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) #include <drm/drm_pagemap.h> diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c index 91d09af71ced..f16e92cd8090 100644 --- a/drivers/gpu/drm/xe/xe_userptr.c +++ b/drivers/gpu/drm/xe/xe_userptr.c @@ -54,6 +54,7 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) struct xe_device *xe = vm->xe; struct drm_gpusvm_ctx ctx = { .read_only = xe_vma_read_only(vma), + .device_private_page_owner = NULL, }; lockdep_assert_held(&vm->lock); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 0cacab20ff85..f602b874e054 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2832,7 +2832,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, } static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma, - bool validate) + bool res_evict, bool validate) { struct xe_bo *bo = xe_vma_bo(vma); struct xe_vm *vm = xe_vma_vm(vma); @@ -2843,7 +2843,8 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma, err = drm_exec_lock_obj(exec, &bo->ttm.base); if (!err && validate) err = xe_bo_validate(bo, vm, - !xe_vm_in_preempt_fence_mode(vm), exec); + !xe_vm_in_preempt_fence_mode(vm) && + res_evict, exec); } return err; @@ -2881,6 +2882,7 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) ctx.read_only = xe_vma_read_only(vma); ctx.devmem_possible = devmem_possible; ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0; + ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe); /* TODO: Threading the migration */ xa_for_each(&op->prefetch_range.range, i, svm_range) { @@ -2912,14 +2914,23 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) } static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, - struct xe_vma_op *op) + struct xe_vma_ops *vops, struct xe_vma_op *op) { int err = 0; + bool res_evict; + + /* + * We only allow evicting a BO within the VM if it is not part of an + * array of binds, as an array of binds can evict another BO within the + * bind. + */ + res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS); switch (op->base.op) { case DRM_GPUVA_OP_MAP: if (!op->map.invalidate_on_bind) err = vma_lock_and_validate(exec, op->map.vma, + res_evict, !xe_vm_in_fault_mode(vm) || op->map.immediate); break; @@ -2930,11 +2941,13 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, err = vma_lock_and_validate(exec, gpuva_to_vma(op->base.remap.unmap->va), - false); + res_evict, false); if (!err && op->remap.prev) - err = vma_lock_and_validate(exec, op->remap.prev, true); + err = vma_lock_and_validate(exec, op->remap.prev, + res_evict, true); if (!err && op->remap.next) - err = vma_lock_and_validate(exec, op->remap.next, true); + err = vma_lock_and_validate(exec, op->remap.next, + res_evict, true); break; case DRM_GPUVA_OP_UNMAP: err = check_ufence(gpuva_to_vma(op->base.unmap.va)); @@ -2943,7 +2956,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, err = vma_lock_and_validate(exec, gpuva_to_vma(op->base.unmap.va), - false); + res_evict, false); break; case DRM_GPUVA_OP_PREFETCH: { @@ -2958,7 +2971,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, err = vma_lock_and_validate(exec, gpuva_to_vma(op->base.prefetch.va), - false); + res_evict, false); if (!err && !xe_vma_has_no_bo(vma)) err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region], @@ -3004,7 +3017,7 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return err; list_for_each_entry(op, &vops->list, link) { - err = op_lock_and_prep(exec, vm, op); + err = op_lock_and_prep(exec, vm, vops, op); if (err) return err; } @@ -3637,6 +3650,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); + if (args->num_binds > 1) + vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS; for (i = 0; i < args->num_binds; ++i) { u64 range = bind_ops[i].range; u64 addr = bind_ops[i].addr; diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index da39940501d8..413353e1c225 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -476,6 +476,7 @@ struct xe_vma_ops { /** @flag: signify the properties within xe_vma_ops*/ #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0) #define XE_VMA_OPS_FLAG_MADVISE BIT(1) +#define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2) u32 flags; #ifdef TEST_VM_OPS_ERROR /** @inject_error: inject error to test error handling */ diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index b44ebf50fedb..652df7a5f4f6 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -26,15 +26,35 @@ #define BAR_SIZE_SHIFT 20 -static void -_resize_bar(struct xe_device *xe, int resno, resource_size_t size) +/* + * Release all the BARs that could influence/block LMEMBAR resizing, i.e. + * assigned IORESOURCE_MEM_64 BARs + */ +static void release_bars(struct pci_dev *pdev) +{ + struct resource *res; + int i; + + pci_dev_for_each_resource(pdev, res, i) { + /* Resource already un-assigned, do not reset it */ + if (!res->parent) + continue; + + /* No need to release unrelated BARs */ + if (!(res->flags & IORESOURCE_MEM_64)) + continue; + + pci_release_resource(pdev, i); + } +} + +static void resize_bar(struct xe_device *xe, int resno, resource_size_t size) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); int bar_size = pci_rebar_bytes_to_size(size); int ret; - if (pci_resource_len(pdev, resno)) - pci_release_resource(pdev, resno); + release_bars(pdev); ret = pci_resize_resource(pdev, resno, bar_size); if (ret) { @@ -50,7 +70,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size) * if force_vram_bar_size is set, attempt to set to the requested size * else set to maximum possible size */ -static void resize_vram_bar(struct xe_device *xe) +void xe_vram_resize_bar(struct xe_device *xe) { int force_vram_bar_size = xe_modparam.force_vram_bar_size; struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -119,7 +139,7 @@ static void resize_vram_bar(struct xe_device *xe) pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd); pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY); - _resize_bar(xe, LMEM_BAR, rebar_size); + resize_bar(xe, LMEM_BAR, rebar_size); pci_assign_unassigned_bus_resources(pdev->bus); pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd); @@ -148,8 +168,6 @@ static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region * return -ENXIO; } - resize_vram_bar(xe); - lmem_bar->io_start = pci_resource_start(pdev, LMEM_BAR); lmem_bar->io_size = pci_resource_len(pdev, LMEM_BAR); if (!lmem_bar->io_size) diff --git a/drivers/gpu/drm/xe/xe_vram.h b/drivers/gpu/drm/xe/xe_vram.h index 72860f714fc6..13505cfb184d 100644 --- a/drivers/gpu/drm/xe/xe_vram.h +++ b/drivers/gpu/drm/xe/xe_vram.h @@ -11,6 +11,7 @@ struct xe_device; struct xe_vram_region; +void xe_vram_resize_bar(struct xe_device *xe); int xe_vram_probe(struct xe_device *xe); struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 5341aa79f387..04420a713be0 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -93,7 +93,7 @@ config HID_GENERIC If unsure, say Y. config HID_HAPTIC - tristate "Haptic touchpad support" + bool "Haptic touchpad support" default n help Support for touchpads with force sensors and haptic actuators instead of a diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 5a95ea3bec98..803b883ae875 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -689,7 +689,14 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); } else { - count = cp2112_write_req(buf, addr, command, + /* Copy starts from data->block[1] so the length can + * be at max I2C_SMBUS_CLOCK_MAX + 1 + */ + + if (data->block[0] > I2C_SMBUS_BLOCK_MAX + 1) + count = -EINVAL; + else + count = cp2112_write_req(buf, addr, command, data->block + 1, data->block[0]); } @@ -700,7 +707,14 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, I2C_SMBUS_BLOCK_MAX, command, NULL, 0); } else { - count = cp2112_write_req(buf, addr, command, + /* data_length here is data->block[0] + 1 + * so make sure that the data->block[0] is + * less than or equals I2C_SMBUS_BLOCK_MAX + 1 + */ + if (data->block[0] > I2C_SMBUS_BLOCK_MAX + 1) + count = -EINVAL; + else + count = cp2112_write_req(buf, addr, command, data->block, data->block[0] + 1); } @@ -709,7 +723,14 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, size = I2C_SMBUS_BLOCK_DATA; read_write = I2C_SMBUS_READ; - count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, + /* data_length is data->block[0] + 1, so + * so data->block[0] should be less than or + * equal to the I2C_SMBUS_BLOCK_MAX + 1 + */ + if (data->block[0] > I2C_SMBUS_BLOCK_MAX + 1) + count = -EINVAL; + else + count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, data->block, data->block[0] + 1); break; diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 7107071c7c51..337d2dc81b4c 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -2523,7 +2523,7 @@ static const struct hid_usage_entry hid_usage_table[] = { { 0x85, 0x0088, "iDeviceName" }, { 0x85, 0x0089, "iDeviceChemistry" }, { 0x85, 0x008a, "ManufacturerData" }, - { 0x85, 0x008b, "Rechargable" }, + { 0x85, 0x008b, "Rechargeable" }, { 0x85, 0x008c, "WarningCapacityLimit" }, { 0x85, 0x008d, "CapacityGranularity1" }, { 0x85, 0x008e, "CapacityGranularity2" }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5721b8414bbd..0723b4b1c9ec 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -342,6 +342,9 @@ #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500 #define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff +#define USB_VENDOR_ID_COOLER_MASTER 0x2516 +#define USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE 0x01b7 + #define USB_VENDOR_ID_CORSAIR 0x1b1c #define USB_DEVICE_ID_CORSAIR_K90 0x1b02 #define USB_DEVICE_ID_CORSAIR_K70R 0x1b09 @@ -1432,6 +1435,7 @@ #define USB_VENDOR_ID_VRS 0x0483 #define USB_DEVICE_ID_VRS_DFP 0xa355 +#define USB_DEVICE_ID_VRS_R295 0xa44c #define USB_VENDOR_ID_VTL 0x0306 #define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 5d7532d79d21..e56e7de53279 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -635,7 +635,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, return; } - if (value == 0 || value < dev->battery_min || value > dev->battery_max) + if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0) + return; + + if (value < dev->battery_min || value > dev->battery_max) return; capacity = hidinput_scale_battery_capacity(dev, value); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index aaef405a717e..5e763de4b94f 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -75,6 +75,7 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(27) #define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(28) #define HIDPP_QUIRK_WIRELESS_STATUS BIT(29) +#define HIDPP_QUIRK_RESET_HI_RES_SCROLL BIT(30) /* These are just aliases for now */ #define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS @@ -193,6 +194,7 @@ struct hidpp_device { void *private_data; struct work_struct work; + struct work_struct reset_hi_res_work; struct kfifo delayed_work_fifo; struct input_dev *delayed_input; @@ -3836,6 +3838,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, struct hidpp_report *answer = hidpp->send_receive_buf; struct hidpp_report *report = (struct hidpp_report *)data; int ret; + int last_online; /* * If the mutex is locked then we have a pending answer from a @@ -3877,6 +3880,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, "See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n"); } + last_online = hidpp->battery.online; if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { ret = hidpp20_battery_event_1000(hidpp, data, size); if (ret != 0) @@ -3901,6 +3905,11 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, return ret; } + if (hidpp->quirks & HIDPP_QUIRK_RESET_HI_RES_SCROLL) { + if (last_online == 0 && hidpp->battery.online == 1) + schedule_work(&hidpp->reset_hi_res_work); + } + if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) { ret = hidpp10_wheel_raw_event(hidpp, data, size); if (ret != 0) @@ -4274,6 +4283,13 @@ static void hidpp_connect_event(struct work_struct *work) hidpp->delayed_input = input; } +static void hidpp_reset_hi_res_handler(struct work_struct *work) +{ + struct hidpp_device *hidpp = container_of(work, struct hidpp_device, reset_hi_res_work); + + hi_res_scroll_enable(hidpp); +} + static DEVICE_ATTR(builtin_power_supply, 0000, NULL, NULL); static struct attribute *sysfs_attrs[] = { @@ -4404,6 +4420,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) } INIT_WORK(&hidpp->work, hidpp_connect_event); + INIT_WORK(&hidpp->reset_hi_res_work, hidpp_reset_hi_res_handler); mutex_init(&hidpp->send_mutex); init_waitqueue_head(&hidpp->wait); @@ -4499,6 +4516,7 @@ static void hidpp_remove(struct hid_device *hdev) hid_hw_stop(hdev); cancel_work_sync(&hidpp->work); + cancel_work_sync(&hidpp->reset_hi_res_work); mutex_destroy(&hidpp->send_mutex); } @@ -4546,6 +4564,9 @@ static const struct hid_device_id hidpp_devices[] = { { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* Logitech G502 Lightspeed Wireless Gaming Mouse */ + LDJ_DEVICE(0x407f), + .driver_data = HIDPP_QUIRK_RESET_HI_RES_SCROLL }, { LDJ_DEVICE(HID_ANY_ID) }, diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 2879e65cf303..179dc316b4b5 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -94,9 +94,8 @@ enum report_mode { TOUCHPAD_REPORT_ALL = TOUCHPAD_REPORT_BUTTONS | TOUCHPAD_REPORT_CONTACTS, }; -#define MT_IO_FLAGS_RUNNING 0 -#define MT_IO_FLAGS_ACTIVE_SLOTS 1 -#define MT_IO_FLAGS_PENDING_SLOTS 2 +#define MT_IO_SLOTS_MASK GENMASK(7, 0) /* reserve first 8 bits for slot tracking */ +#define MT_IO_FLAGS_RUNNING 32 static const bool mtrue = true; /* default for true */ static const bool mfalse; /* default for false */ @@ -172,7 +171,11 @@ struct mt_device { struct timer_list release_timer; /* to release sticky fingers */ struct hid_haptic_device *haptic; /* haptic related configuration */ struct hid_device *hdev; /* hid_device we're attached to */ - unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */ + unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_RUNNING) + * first 8 bits are reserved for keeping the slot + * states, this is fine because we only support up + * to 250 slots (MT_MAX_MAXCONTACT) + */ __u8 inputmode_value; /* InputMode HID feature value */ __u8 maxcontacts; bool is_buttonpad; /* is this device a button pad? */ @@ -986,6 +989,7 @@ static void mt_release_pending_palms(struct mt_device *td, for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) { clear_bit(slotnum, app->pending_palm_slots); + clear_bit(slotnum, &td->mt_io_flags); input_mt_slot(input, slotnum); input_mt_report_slot_inactive(input); @@ -1019,12 +1023,6 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app, app->left_button_state = 0; if (td->is_haptic_touchpad) hid_haptic_pressure_reset(td->haptic); - - if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags)) - set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); - else - clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); - clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); } static int mt_compute_timestamp(struct mt_application *app, __s32 value) @@ -1202,7 +1200,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); - set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); + set_bit(slotnum, &td->mt_io_flags); + } else { + clear_bit(slotnum, &td->mt_io_flags); } return 0; @@ -1337,7 +1337,7 @@ static void mt_touch_report(struct hid_device *hid, * defect. */ if (app->quirks & MT_QUIRK_STICKY_FINGERS) { - if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) + if (td->mt_io_flags & MT_IO_SLOTS_MASK) mod_timer(&td->release_timer, jiffies + msecs_to_jiffies(100)); else @@ -1742,6 +1742,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) case HID_CP_CONSUMER_CONTROL: case HID_GD_WIRELESS_RADIO_CTLS: case HID_GD_SYSTEM_MULTIAXIS: + case HID_DG_PEN: /* already handled by hid core */ break; case HID_DG_TOUCHSCREEN: @@ -1813,6 +1814,7 @@ static void mt_release_contacts(struct hid_device *hid) for (i = 0; i < mt->num_slots; i++) { input_mt_slot(input_dev, i); input_mt_report_slot_inactive(input_dev); + clear_bit(i, &td->mt_io_flags); } input_mt_sync_frame(input_dev); input_sync(input_dev); @@ -1835,7 +1837,7 @@ static void mt_expired_timeout(struct timer_list *t) */ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; - if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) + if (td->mt_io_flags & MT_IO_SLOTS_MASK) mt_release_contacts(hdev); clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index fb4985988615..c2849a541f65 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -1455,10 +1455,10 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr, ctlr->imu_avg_delta_ms; ctlr->imu_timestamp_us += 1000 * ctlr->imu_avg_delta_ms; if (dropped_pkts > JC_IMU_DROPPED_PKT_WARNING) { - hid_warn(ctlr->hdev, + hid_warn_ratelimited(ctlr->hdev, "compensating for %u dropped IMU reports\n", dropped_pkts); - hid_warn(ctlr->hdev, + hid_warn_ratelimited(ctlr->hdev, "delta=%u avg_delta=%u\n", delta, ctlr->imu_avg_delta_ms); } @@ -2420,7 +2420,7 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) struct joycon_input_report *report; req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO; - ret = joycon_send_subcmd(ctlr, &req, 0, HZ); + ret = joycon_send_subcmd(ctlr, &req, 0, 2 * HZ); if (ret) { hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret); return ret; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index ffd034566e2e..bcd4bccf1a7c 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -57,6 +57,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_COOLER_MASTER, USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS }, @@ -206,6 +207,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_R295), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c index 8433a991e7f4..0156ab391778 100644 --- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c +++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c @@ -466,7 +466,7 @@ static void quicki2c_dma_adv_enable(struct quicki2c_device *qcdev) dev_warn(qcdev->dev, "Max frame size is smaller than hid max input length!"); thc_i2c_set_rx_max_size(qcdev->thc_hw, - le16_to_cpu(qcdev->i2c_max_frame_size)); + qcdev->i2c_max_frame_size); } thc_i2c_rx_max_size_enable(qcdev->thc_hw, true); } diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c index 84314989dc53..14cabd5dc6dd 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c +++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c @@ -33,6 +33,10 @@ struct quickspi_driver_data ptl = { .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL, }; +struct quickspi_driver_data arl = { + .max_packet_size_value = MAX_PACKET_SIZE_VALUE_MTL, +}; + /* THC QuickSPI ACPI method to get device properties */ /* HIDSPI Method: {6e2ac436-0fcf-41af-a265-b32a220dcfab} */ static guid_t hidspi_guid = @@ -978,6 +982,8 @@ static const struct pci_device_id quickspi_pci_tbl[] = { {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), }, {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), }, {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), }, + {PCI_DEVICE_DATA(INTEL, THC_ARL_DEVICE_ID_SPI_PORT1, &arl), }, + {PCI_DEVICE_DATA(INTEL, THC_ARL_DEVICE_ID_SPI_PORT2, &arl), }, {} }; MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl); diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h index f3532d866749..c30e1a42eb09 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h +++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h @@ -21,6 +21,8 @@ #define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B #define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49 #define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B +#define PCI_DEVICE_ID_INTEL_THC_ARL_DEVICE_ID_SPI_PORT1 0x7749 +#define PCI_DEVICE_ID_INTEL_THC_ARL_DEVICE_ID_SPI_PORT2 0x774B /* HIDSPI special ACPI parameters DSM methods */ #define ACPI_QUICKSPI_REVISION_NUM 2 diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c index e6ba2ddcc9cb..16f780bc879b 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c +++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c @@ -280,8 +280,7 @@ int reset_tic(struct quickspi_device *qsdev) qsdev->reset_ack = false; - /* First interrupt uses level trigger to avoid missing interrupt */ - thc_int_trigger_type_select(qsdev->thc_hw, false); + thc_int_trigger_type_select(qsdev->thc_hw, true); ret = acpi_tic_reset(qsdev); if (ret) diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 74b66aec33d4..ee86df4cff4b 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -30,7 +30,7 @@ static int i2c_debug; #define pca_clock(adap) adap->i2c_clock #define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val) #define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON) -#define pca_wait(adap) adap->wait_for_completion(adap->data) +#define pca_wait(adap) adap->wait_for_completion_cb(adap->data) static void pca_reset(struct i2c_algo_pca_data *adap) { diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h index 018a42de8b1e..9b7e9494dd12 100644 --- a/drivers/i2c/busses/i2c-amd-mp2.h +++ b/drivers/i2c/busses/i2c-amd-mp2.h @@ -207,7 +207,6 @@ static inline void amd_mp2_pm_runtime_get(struct amd_mp2_dev *mp2_dev) static inline void amd_mp2_pm_runtime_put(struct amd_mp2_dev *mp2_dev) { - pm_runtime_mark_last_busy(&mp2_dev->pci_dev->dev); pm_runtime_put_autosuspend(&mp2_dev->pci_dev->dev); } diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index edc047e3e535..b64adef778d4 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -313,7 +313,6 @@ static int __maybe_unused at91_twi_resume_noirq(struct device *dev) return ret; } - pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); at91_init_twi_bus(twi_dev); diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index 59795c1c24ff..894cedbca99f 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -717,7 +717,6 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) ret = (ret < 0) ? ret : num; out: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 697d095afbe4..0fb728ade92e 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -1128,7 +1128,6 @@ out: cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); #endif - pm_runtime_mark_last_busy(id->dev); pm_runtime_put_autosuspend(id->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 6a3d4e9e07f4..a773ba082321 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -543,7 +543,6 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) ret = num; out: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; @@ -821,7 +820,6 @@ static int davinci_i2c_probe(struct platform_device *pdev) if (r) goto err_unuse_clocks; - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index c7a72c28786c..41e9b5ecad20 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -901,7 +901,6 @@ done: i2c_dw_release_lock(dev); done_nolock: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 5358f5ddf924..95ab910b80c0 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -373,7 +373,6 @@ static int hix5hd2_i2c_xfer(struct i2c_adapter *adap, ret = num; out: - pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index cba992fa6557..57fbec1259be 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -930,7 +930,6 @@ out: */ iowrite8(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv)); - pm_runtime_mark_last_busy(&priv->pci_dev->dev); pm_runtime_put_autosuspend(&priv->pci_dev->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index a454f9f25146..88192c25c44c 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1131,7 +1131,6 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, break; } - pm_runtime_mark_last_busy(adap->dev.parent); pm_runtime_put_autosuspend(adap->dev.parent); return i2c->msg_status ? i2c->msg_status : num; @@ -1165,7 +1164,6 @@ static int img_i2c_init(struct img_i2c *i2c) "Unknown hardware revision (%d.%d.%d.%d)\n", (rev >> 24) & 0xff, (rev >> 16) & 0xff, (rev >> 8) & 0xff, rev & 0xff); - pm_runtime_mark_last_busy(i2c->adap.dev.parent); pm_runtime_put_autosuspend(i2c->adap.dev.parent); return -EINVAL; } @@ -1317,7 +1315,6 @@ static int img_i2c_init(struct img_i2c *i2c) /* Perform a synchronous sequence to reset the bus */ ret = img_i2c_reset_bus(i2c); - pm_runtime_mark_last_busy(i2c->adap.dev.parent); pm_runtime_put_autosuspend(i2c->adap.dev.parent); return ret; diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 03b5a7e8c361..2a0962a0b441 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -363,7 +363,6 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx) return 0; rpm_put: - pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent); pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent); return ret; @@ -377,7 +376,6 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx) temp &= ~MCR_MEN; writel(temp, lpi2c_imx->base + LPI2C_MCR); - pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent); pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent); return 0; @@ -1462,7 +1460,6 @@ static int lpi2c_imx_probe(struct platform_device *pdev) if (ret) goto rpm_disable; - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n"); @@ -1564,7 +1561,6 @@ static int lpi2c_suspend(struct device *dev) static int lpi2c_resume(struct device *dev) { - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 60f5c790ad7c..dcce882f3eba 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1637,7 +1637,6 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, result = i2c_imx_xfer_common(adapter, msgs, num, false); - pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent); pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent); return result; @@ -1822,7 +1821,6 @@ static int i2c_imx_probe(struct platform_device *pdev) if (ret < 0) goto clk_notifier_unregister; - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq); @@ -1928,7 +1926,6 @@ static int i2c_imx_suspend(struct device *dev) static int i2c_imx_resume(struct device *dev) { - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index dee40704825c..aefdbee1f03c 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -868,7 +868,7 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src, return 0; } -static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) +static void mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) { unsigned int clk_src; unsigned int step_cnt; @@ -938,9 +938,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) break; } - - - return 0; } static void i2c_dump_register(struct mtk_i2c *i2c) @@ -1460,11 +1457,7 @@ static int mtk_i2c_probe(struct platform_device *pdev) strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name)); - ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk)); - if (ret) { - dev_err(&pdev->dev, "Failed to set the speed.\n"); - return -EINVAL; - } + mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk)); if (i2c->dev_comp->max_dma_support > 32) { ret = dma_set_mask(&pdev->dev, diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 8fc26a511320..1acba628e16c 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -766,7 +766,6 @@ mv64xxx_i2c_xfer_core(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) drv_data->num_msgs = 0; drv_data->msgs = NULL; - pm_runtime_mark_last_busy(&adap->dev); pm_runtime_put_autosuspend(&adap->dev); return ret; diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index 541d808d62d0..14c059b03945 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -216,7 +216,6 @@ exit: if (status2 < 0) dev_err(i2cd->dev, "i2c stop failed %d\n", status2); } - pm_runtime_mark_last_busy(i2cd->dev); pm_runtime_put_autosuspend(i2cd->dev); return status; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 5fcc9f6c33e5..d9f590f0c384 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -828,7 +828,6 @@ omap_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg msgs[], int num, omap->set_mpu_wkup_lat(omap->dev, -1); out: - pm_runtime_mark_last_busy(omap->dev); pm_runtime_put_autosuspend(omap->dev); return r; } @@ -1510,7 +1509,6 @@ omap_i2c_probe(struct platform_device *pdev) dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr, major, minor, omap->speed); - pm_runtime_mark_last_busy(omap->dev); pm_runtime_put_autosuspend(omap->dev); return 0; @@ -1605,7 +1603,6 @@ static int omap_i2c_suspend(struct device *dev) static int omap_i2c_resume(struct device *dev) { - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index 85e8cf58e8bf..0cbf2f509527 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c @@ -95,7 +95,7 @@ static struct i2c_algo_pca_data pca_isa_data = { /* .data intentionally left NULL, not needed with ISA */ .write_byte = pca_isa_writebyte, .read_byte = pca_isa_readbyte, - .wait_for_completion = pca_isa_waitforcompletion, + .wait_for_completion_cb = pca_isa_waitforcompletion, .reset_chip = pca_isa_resetchip, }; diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index 87da8241b927..c0f35ebbe37d 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -180,7 +180,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) } i2c->algo_data.data = i2c; - i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion; + i2c->algo_data.wait_for_completion_cb = i2c_pca_pf_waitforcompletion; if (i2c->gpio) i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; else diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index a3afa11a71a1..e631d79baf14 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -450,7 +450,6 @@ static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) ret = num; err: - pm_runtime_mark_last_busy(cci->dev); pm_runtime_put_autosuspend(cci->dev); return ret; @@ -508,7 +507,6 @@ static int __maybe_unused cci_suspend(struct device *dev) static int __maybe_unused cci_resume(struct device *dev) { cci_resume_runtime(dev); - pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); return 0; diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 95a577764d5c..43fdd89b8beb 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -714,7 +714,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap, else ret = geni_i2c_fifo_xfer(gi2c, msgs, num); - pm_runtime_mark_last_busy(gi2c->se.dev); pm_runtime_put_autosuspend(gi2c->se.dev); gi2c->cur = NULL; gi2c->err = 0; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index fc348924d522..a0e076fc5f36 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1139,7 +1139,6 @@ static int qup_i2c_xfer(struct i2c_adapter *adap, ret = num; out: - pm_runtime_mark_last_busy(qup->dev); pm_runtime_put_autosuspend(qup->dev); return ret; @@ -1624,7 +1623,6 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap, if (ret == 0) ret = num; out: - pm_runtime_mark_last_busy(qup->dev); pm_runtime_put_autosuspend(qup->dev); return ret; @@ -1991,7 +1989,6 @@ static int qup_i2c_suspend(struct device *device) static int qup_i2c_resume(struct device *device) { qup_i2c_pm_resume_runtime(device); - pm_runtime_mark_last_busy(device); pm_request_autosuspend(device); return 0; } diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index b0ee9ac45a97..3e8f126cb7f7 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -206,7 +206,6 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) } out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return riic->err ?: num; @@ -452,7 +451,6 @@ static int riic_init_hw(struct riic_dev *riic) riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; } diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c index af991b28e4f8..4723e48cfe18 100644 --- a/drivers/i2c/busses/i2c-rtl9300.c +++ b/drivers/i2c/busses/i2c-rtl9300.c @@ -8,6 +8,7 @@ #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/unaligned.h> enum rtl9300_bus_freq { RTL9300_I2C_STD_FREQ, @@ -20,103 +21,143 @@ struct rtl9300_i2c_chan { struct i2c_adapter adap; struct rtl9300_i2c *i2c; enum rtl9300_bus_freq bus_freq; - u8 sda_pin; + u8 sda_num; +}; + +enum rtl9300_i2c_reg_scope { + REG_SCOPE_GLOBAL, + REG_SCOPE_MASTER, +}; + +struct rtl9300_i2c_reg_field { + struct reg_field field; + enum rtl9300_i2c_reg_scope scope; +}; + +enum rtl9300_i2c_reg_fields { + F_DATA_WIDTH = 0, + F_DEV_ADDR, + F_I2C_FAIL, + F_I2C_TRIG, + F_MEM_ADDR, + F_MEM_ADDR_WIDTH, + F_RD_MODE, + F_RWOP, + F_SCL_FREQ, + F_SCL_SEL, + F_SDA_OUT_SEL, + F_SDA_SEL, + + /* keep last */ + F_NUM_FIELDS +}; + +struct rtl9300_i2c_drv_data { + struct rtl9300_i2c_reg_field field_desc[F_NUM_FIELDS]; + int (*select_scl)(struct rtl9300_i2c *i2c, u8 scl); + u32 data_reg; + u8 max_nchan; }; #define RTL9300_I2C_MUX_NCHAN 8 +#define RTL9310_I2C_MUX_NCHAN 12 struct rtl9300_i2c { struct regmap *regmap; struct device *dev; - struct rtl9300_i2c_chan chans[RTL9300_I2C_MUX_NCHAN]; + struct rtl9300_i2c_chan chans[RTL9310_I2C_MUX_NCHAN]; + struct regmap_field *fields[F_NUM_FIELDS]; u32 reg_base; - u8 sda_pin; + u32 data_reg; + u8 scl_num; + u8 sda_num; struct mutex lock; }; +DEFINE_GUARD(rtl9300_i2c, struct rtl9300_i2c *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock)) + +enum rtl9300_i2c_xfer_type { + RTL9300_I2C_XFER_BYTE, + RTL9300_I2C_XFER_WORD, + RTL9300_I2C_XFER_BLOCK, +}; + +struct rtl9300_i2c_xfer { + enum rtl9300_i2c_xfer_type type; + u16 dev_addr; + u8 reg_addr; + u8 reg_addr_len; + u8 *data; + u8 data_len; + bool write; +}; + #define RTL9300_I2C_MST_CTRL1 0x0 -#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS 8 -#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK GENMASK(31, 8) -#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS 4 -#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK GENMASK(6, 4) -#define RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL BIT(3) -#define RTL9300_I2C_MST_CTRL1_RWOP BIT(2) -#define RTL9300_I2C_MST_CTRL1_I2C_FAIL BIT(1) -#define RTL9300_I2C_MST_CTRL1_I2C_TRIG BIT(0) #define RTL9300_I2C_MST_CTRL2 0x4 -#define RTL9300_I2C_MST_CTRL2_RD_MODE BIT(15) -#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS 8 -#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK GENMASK(14, 8) -#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS 4 -#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK GENMASK(7, 4) -#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS 2 -#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK GENMASK(3, 2) -#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS 0 -#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK GENMASK(1, 0) #define RTL9300_I2C_MST_DATA_WORD0 0x8 #define RTL9300_I2C_MST_DATA_WORD1 0xc #define RTL9300_I2C_MST_DATA_WORD2 0x10 #define RTL9300_I2C_MST_DATA_WORD3 0x14 - #define RTL9300_I2C_MST_GLB_CTRL 0x384 +#define RTL9310_I2C_MST_IF_CTRL 0x1004 +#define RTL9310_I2C_MST_IF_SEL 0x1008 +#define RTL9310_I2C_MST_CTRL 0x0 +#define RTL9310_I2C_MST_MEMADDR_CTRL 0x4 +#define RTL9310_I2C_MST_DATA_CTRL 0x8 + static int rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len) { - u32 val, mask; int ret; - val = len << RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS; - mask = RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK; - - ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val); + ret = regmap_field_write(i2c->fields[F_MEM_ADDR_WIDTH], len); if (ret) return ret; - val = reg << RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS; - mask = RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK; - - return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); + return regmap_field_write(i2c->fields[F_MEM_ADDR], reg); } -static int rtl9300_i2c_config_io(struct rtl9300_i2c *i2c, u8 sda_pin) +static int rtl9300_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl) { - int ret; - u32 val, mask; - - ret = regmap_update_bits(i2c->regmap, RTL9300_I2C_MST_GLB_CTRL, BIT(sda_pin), BIT(sda_pin)); - if (ret) - return ret; - - val = (sda_pin << RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS) | - RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL; - mask = RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK | RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL; + return regmap_field_write(i2c->fields[F_SCL_SEL], 1); +} - return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); +static int rtl9310_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl) +{ + return regmap_field_update_bits(i2c->fields[F_SCL_SEL], BIT(scl), BIT(scl)); } -static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan, - u16 addr, u16 len) +static int rtl9300_i2c_config_chan(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan) { - u32 val, mask; + struct rtl9300_i2c_drv_data *drv_data; + int ret; - if (len < 1 || len > 16) - return -EINVAL; + if (i2c->sda_num == chan->sda_num) + return 0; - val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS; - mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK; + ret = regmap_field_write(i2c->fields[F_SCL_FREQ], chan->bus_freq); + if (ret) + return ret; - val |= addr << RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS; - mask |= RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK; + drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev); + ret = drv_data->select_scl(i2c, i2c->scl_num); + if (ret) + return ret; - val |= ((len - 1) & 0xf) << RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS; - mask |= RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK; + ret = regmap_field_update_bits(i2c->fields[F_SDA_SEL], BIT(chan->sda_num), + BIT(chan->sda_num)); + if (ret) + return ret; - mask |= RTL9300_I2C_MST_CTRL2_RD_MODE; + ret = regmap_field_write(i2c->fields[F_SDA_OUT_SEL], chan->sda_num); + if (ret) + return ret; - return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val); + i2c->sda_num = chan->sda_num; + return 0; } -static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len) +static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, u8 len) { u32 vals[4] = {}; int i, ret; @@ -124,8 +165,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len) if (len > 16) return -EIO; - ret = regmap_bulk_read(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, - vals, ARRAY_SIZE(vals)); + ret = regmap_bulk_read(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals)); if (ret) return ret; @@ -137,7 +177,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len) return 0; } -static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len) +static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, u8 len) { u32 vals[4] = {}; int i; @@ -152,56 +192,94 @@ static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len) vals[reg] |= buf[i] << shift; } - return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, - vals, ARRAY_SIZE(vals)); + return regmap_bulk_write(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals)); } static int rtl9300_i2c_writel(struct rtl9300_i2c *i2c, u32 data) { - return regmap_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, data); + return regmap_write(i2c->regmap, i2c->data_reg, data); } -static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write, - int size, union i2c_smbus_data *data, int len) +static int rtl9300_i2c_prepare_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer) { - u32 val, mask; int ret; - val = read_write == I2C_SMBUS_WRITE ? RTL9300_I2C_MST_CTRL1_RWOP : 0; - mask = RTL9300_I2C_MST_CTRL1_RWOP; + if (xfer->data_len < 1 || xfer->data_len > 16) + return -EINVAL; + + ret = regmap_field_write(i2c->fields[F_DEV_ADDR], xfer->dev_addr); + if (ret) + return ret; + + ret = rtl9300_i2c_reg_addr_set(i2c, xfer->reg_addr, xfer->reg_addr_len); + if (ret) + return ret; + + ret = regmap_field_write(i2c->fields[F_RWOP], xfer->write); + if (ret) + return ret; + + ret = regmap_field_write(i2c->fields[F_DATA_WIDTH], (xfer->data_len - 1) & 0xf); + if (ret) + return ret; - val |= RTL9300_I2C_MST_CTRL1_I2C_TRIG; - mask |= RTL9300_I2C_MST_CTRL1_I2C_TRIG; + if (xfer->write) { + switch (xfer->type) { + case RTL9300_I2C_XFER_BYTE: + ret = rtl9300_i2c_writel(i2c, *xfer->data); + break; + case RTL9300_I2C_XFER_WORD: + ret = rtl9300_i2c_writel(i2c, get_unaligned((const u16 *)xfer->data)); + break; + default: + ret = rtl9300_i2c_write(i2c, xfer->data, xfer->data_len); + break; + } + } - ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); + return ret; +} + +static int rtl9300_i2c_do_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer) +{ + u32 val; + int ret; + + ret = regmap_field_write(i2c->fields[F_I2C_TRIG], 1); if (ret) return ret; - ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, - val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 100000); + ret = regmap_field_read_poll_timeout(i2c->fields[F_I2C_TRIG], val, !val, 100, 100000); if (ret) return ret; - if (val & RTL9300_I2C_MST_CTRL1_I2C_FAIL) + ret = regmap_field_read(i2c->fields[F_I2C_FAIL], &val); + if (ret) + return ret; + if (val) return -EIO; - if (read_write == I2C_SMBUS_READ) { - if (size == I2C_SMBUS_BYTE || size == I2C_SMBUS_BYTE_DATA) { - ret = regmap_read(i2c->regmap, - i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val); + if (!xfer->write) { + switch (xfer->type) { + case RTL9300_I2C_XFER_BYTE: + ret = regmap_read(i2c->regmap, i2c->data_reg, &val); if (ret) return ret; - data->byte = val & 0xff; - } else if (size == I2C_SMBUS_WORD_DATA) { - ret = regmap_read(i2c->regmap, - i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val); + + *xfer->data = val & 0xff; + break; + case RTL9300_I2C_XFER_WORD: + ret = regmap_read(i2c->regmap, i2c->data_reg, &val); if (ret) return ret; - data->word = val & 0xffff; - } else { - ret = rtl9300_i2c_read(i2c, &data->block[0], len); + + put_unaligned(val & 0xffff, (u16*)xfer->data); + break; + default: + ret = rtl9300_i2c_read(i2c, xfer->data, xfer->data_len); if (ret) return ret; + break; } } @@ -214,100 +292,68 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s { struct rtl9300_i2c_chan *chan = i2c_get_adapdata(adap); struct rtl9300_i2c *i2c = chan->i2c; - int len = 0, ret; + struct rtl9300_i2c_xfer xfer = {0}; + int ret; - mutex_lock(&i2c->lock); - if (chan->sda_pin != i2c->sda_pin) { - ret = rtl9300_i2c_config_io(i2c, chan->sda_pin); - if (ret) - goto out_unlock; - i2c->sda_pin = chan->sda_pin; - } + if (addr > 0x7f) + return -EINVAL; + + guard(rtl9300_i2c)(i2c); + + ret = rtl9300_i2c_config_chan(i2c, chan); + if (ret) + return ret; + + xfer.dev_addr = addr & 0x7f; + xfer.write = (read_write == I2C_SMBUS_WRITE); + xfer.reg_addr = command; + xfer.reg_addr_len = 1; switch (size) { case I2C_SMBUS_BYTE: - if (read_write == I2C_SMBUS_WRITE) { - ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0); - if (ret) - goto out_unlock; - ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); - if (ret) - goto out_unlock; - } else { - ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1); - if (ret) - goto out_unlock; - ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0); - if (ret) - goto out_unlock; - } + xfer.data = (read_write == I2C_SMBUS_READ) ? &data->byte : &command; + xfer.data_len = 1; + xfer.reg_addr = 0; + xfer.reg_addr_len = 0; + xfer.type = RTL9300_I2C_XFER_BYTE; break; - case I2C_SMBUS_BYTE_DATA: - ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); - if (ret) - goto out_unlock; - ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1); - if (ret) - goto out_unlock; - if (read_write == I2C_SMBUS_WRITE) { - ret = rtl9300_i2c_writel(i2c, data->byte); - if (ret) - goto out_unlock; - } + xfer.data = &data->byte; + xfer.data_len = 1; + xfer.type = RTL9300_I2C_XFER_BYTE; break; - case I2C_SMBUS_WORD_DATA: - ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); - if (ret) - goto out_unlock; - ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 2); - if (ret) - goto out_unlock; - if (read_write == I2C_SMBUS_WRITE) { - ret = rtl9300_i2c_writel(i2c, data->word); - if (ret) - goto out_unlock; - } + xfer.data = (u8 *)&data->word; + xfer.data_len = 2; + xfer.type = RTL9300_I2C_XFER_WORD; break; - case I2C_SMBUS_BLOCK_DATA: - ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); - if (ret) - goto out_unlock; - if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) { - ret = -EINVAL; - goto out_unlock; - } - ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0] + 1); - if (ret) - goto out_unlock; - if (read_write == I2C_SMBUS_WRITE) { - ret = rtl9300_i2c_write(i2c, &data->block[0], data->block[0] + 1); - if (ret) - goto out_unlock; - } - len = data->block[0] + 1; + xfer.data = &data->block[0]; + xfer.data_len = data->block[0] + 1; + xfer.type = RTL9300_I2C_XFER_BLOCK; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + xfer.data = &data->block[1]; + xfer.data_len = data->block[0]; + xfer.type = RTL9300_I2C_XFER_BLOCK; break; - default: dev_err(&adap->dev, "Unsupported transaction %d\n", size); - ret = -EOPNOTSUPP; - goto out_unlock; + return -EOPNOTSUPP; } - ret = rtl9300_i2c_execute_xfer(i2c, read_write, size, data, len); - -out_unlock: - mutex_unlock(&i2c->lock); + ret = rtl9300_i2c_prepare_xfer(i2c, &xfer); + if (ret) + return ret; - return ret; + return rtl9300_i2c_do_xfer(i2c, &xfer); } static u32 rtl9300_i2c_func(struct i2c_adapter *a) { return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; + I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK; } static const struct i2c_algorithm rtl9300_i2c_algo = { @@ -325,9 +371,11 @@ static int rtl9300_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rtl9300_i2c *i2c; - u32 clock_freq, sda_pin; - int ret, i = 0; struct fwnode_handle *child; + struct rtl9300_i2c_drv_data *drv_data; + struct reg_field fields[F_NUM_FIELDS]; + u32 clock_freq, scl_num, sda_num; + int ret, i = 0; i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) @@ -344,16 +392,34 @@ static int rtl9300_i2c_probe(struct platform_device *pdev) if (ret) return ret; + ret = device_property_read_u32(dev, "realtek,scl", &scl_num); + if (ret || scl_num != 1) + scl_num = 0; + i2c->scl_num = (u8)scl_num; + platform_set_drvdata(pdev, i2c); - if (device_get_child_node_count(dev) > RTL9300_I2C_MUX_NCHAN) + drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev); + if (device_get_child_node_count(dev) > drv_data->max_nchan) return dev_err_probe(dev, -EINVAL, "Too many channels\n"); + i2c->data_reg = i2c->reg_base + drv_data->data_reg; + for (i = 0; i < F_NUM_FIELDS; i++) { + fields[i] = drv_data->field_desc[i].field; + if (drv_data->field_desc[i].scope == REG_SCOPE_MASTER) + fields[i].reg += i2c->reg_base; + } + ret = devm_regmap_field_bulk_alloc(dev, i2c->regmap, i2c->fields, + fields, F_NUM_FIELDS); + if (ret) + return ret; + + i = 0; device_for_each_child_node(dev, child) { struct rtl9300_i2c_chan *chan = &i2c->chans[i]; struct i2c_adapter *adap = &chan->adap; - ret = fwnode_property_read_u32(child, "reg", &sda_pin); + ret = fwnode_property_read_u32(child, "reg", &sda_num); if (ret) return ret; @@ -365,17 +431,16 @@ static int rtl9300_i2c_probe(struct platform_device *pdev) case I2C_MAX_STANDARD_MODE_FREQ: chan->bus_freq = RTL9300_I2C_STD_FREQ; break; - case I2C_MAX_FAST_MODE_FREQ: chan->bus_freq = RTL9300_I2C_FAST_FREQ; break; default: dev_warn(i2c->dev, "SDA%d clock-frequency %d not supported using default\n", - sda_pin, clock_freq); + sda_num, clock_freq); break; } - chan->sda_pin = sda_pin; + chan->sda_num = sda_num; chan->i2c = i2c; adap = &i2c->chans[i].adap; adap->owner = THIS_MODULE; @@ -385,23 +450,77 @@ static int rtl9300_i2c_probe(struct platform_device *pdev) adap->dev.parent = dev; i2c_set_adapdata(adap, chan); adap->dev.of_node = to_of_node(child); - snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_pin); + snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_num); i++; ret = devm_i2c_add_adapter(dev, adap); if (ret) return ret; } - i2c->sda_pin = 0xff; + i2c->sda_num = 0xff; + + /* only use standard read format */ + ret = regmap_field_write(i2c->fields[F_RD_MODE], 0); + if (ret) + return ret; return 0; } +#define GLB_REG_FIELD(reg, msb, lsb) \ + { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_GLOBAL } +#define MST_REG_FIELD(reg, msb, lsb) \ + { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_MASTER } + +static const struct rtl9300_i2c_drv_data rtl9300_i2c_drv_data = { + .field_desc = { + [F_MEM_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 8, 31), + [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 4, 6), + [F_SCL_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 3, 3), + [F_RWOP] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 2, 2), + [F_I2C_FAIL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 1, 1), + [F_I2C_TRIG] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 0, 0), + [F_RD_MODE] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 15, 15), + [F_DEV_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 8, 14), + [F_DATA_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 4, 7), + [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 2, 3), + [F_SCL_FREQ] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 0, 1), + [F_SDA_SEL] = GLB_REG_FIELD(RTL9300_I2C_MST_GLB_CTRL, 0, 7), + }, + .select_scl = rtl9300_i2c_select_scl, + .data_reg = RTL9300_I2C_MST_DATA_WORD0, + .max_nchan = RTL9300_I2C_MUX_NCHAN, +}; + +static const struct rtl9300_i2c_drv_data rtl9310_i2c_drv_data = { + .field_desc = { + [F_SCL_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 12, 13), + [F_SDA_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 0, 11), + [F_SCL_FREQ] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 30, 31), + [F_DEV_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 11, 17), + [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 18, 21), + [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 9, 10), + [F_DATA_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 5, 8), + [F_RD_MODE] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 4, 4), + [F_RWOP] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 2, 2), + [F_I2C_FAIL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 1, 1), + [F_I2C_TRIG] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 0, 0), + [F_MEM_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_MEMADDR_CTRL, 0, 23), + }, + .select_scl = rtl9310_i2c_select_scl, + .data_reg = RTL9310_I2C_MST_DATA_CTRL, + .max_nchan = RTL9310_I2C_MUX_NCHAN, +}; + static const struct of_device_id i2c_rtl9300_dt_ids[] = { - { .compatible = "realtek,rtl9301-i2c" }, - { .compatible = "realtek,rtl9302b-i2c" }, - { .compatible = "realtek,rtl9302c-i2c" }, - { .compatible = "realtek,rtl9303-i2c" }, + { .compatible = "realtek,rtl9301-i2c", .data = (void *) &rtl9300_i2c_drv_data }, + { .compatible = "realtek,rtl9302b-i2c", .data = (void *) &rtl9300_i2c_drv_data }, + { .compatible = "realtek,rtl9302c-i2c", .data = (void *) &rtl9300_i2c_drv_data }, + { .compatible = "realtek,rtl9303-i2c", .data = (void *) &rtl9300_i2c_drv_data }, + { .compatible = "realtek,rtl9310-i2c", .data = (void *) &rtl9310_i2c_drv_data }, + { .compatible = "realtek,rtl9311-i2c", .data = (void *) &rtl9310_i2c_drv_data }, + { .compatible = "realtek,rtl9312-i2c", .data = (void *) &rtl9310_i2c_drv_data }, + { .compatible = "realtek,rtl9313-i2c", .data = (void *) &rtl9310_i2c_drv_data }, {} }; MODULE_DEVICE_TABLE(of, i2c_rtl9300_dt_ids); diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c index b0e9c0b62429..238714850673 100644 --- a/drivers/i2c/busses/i2c-rzv2m.c +++ b/drivers/i2c/busses/i2c-rzv2m.c @@ -372,7 +372,6 @@ static int rzv2m_i2c_xfer(struct i2c_adapter *adap, ret = num; out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 26ec34b19ad5..1b490525d8dd 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -302,7 +302,6 @@ static int sprd_i2c_xfer(struct i2c_adapter *i2c_adap, ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im++], 1); err_msg: - pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return ret < 0 ? ret : im; @@ -559,7 +558,6 @@ static int sprd_i2c_probe(struct platform_device *pdev) goto err_rpm_put; } - pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return 0; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index e6815f6cae78..dc69ed934ec8 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1761,7 +1761,6 @@ static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap, } pm_free: - pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return (ret < 0) ? ret : num; @@ -1870,7 +1869,6 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, } pm_free: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } @@ -1977,7 +1975,6 @@ pm_free: if (!stm32f7_i2c_is_slave_registered(i2c_dev)) stm32f7_i2c_enable_wakeup(i2c_dev, false); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2015,7 +2012,6 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave) stm32f7_i2c_enable_wakeup(i2c_dev, false); } - pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return 0; @@ -2328,7 +2324,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr); - pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return 0; diff --git a/drivers/i2c/busses/i2c-usbio.c b/drivers/i2c/busses/i2c-usbio.c index d42f9ab6e9a5..e7799abf6787 100644 --- a/drivers/i2c/busses/i2c-usbio.c +++ b/drivers/i2c/busses/i2c-usbio.c @@ -27,6 +27,7 @@ static const struct acpi_device_id usbio_i2c_acpi_hids[] = { { "INTC1008" }, /* MTL */ { "INTC10B3" }, /* ARL */ { "INTC10B6" }, /* LNL */ + { "INTC10D2" }, /* MTL-CVF */ { "INTC10E3" }, /* PTL */ { } }; diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 607026c921d6..28015d77599d 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -1349,7 +1349,6 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) mutex_unlock(&i2c->lock); out: - pm_runtime_mark_last_busy(i2c->dev); pm_runtime_put_autosuspend(i2c->dev); return err; } diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c index 338800321f8b..4df8ad092df3 100644 --- a/drivers/i2c/i2c-boardinfo.c +++ b/drivers/i2c/i2c-boardinfo.c @@ -22,7 +22,7 @@ EXPORT_SYMBOL_GPL(__i2c_board_lock); LIST_HEAD(__i2c_board_list); EXPORT_SYMBOL_GPL(__i2c_board_list); -int __i2c_first_dynamic_bus_num __ro_after_init; +int __i2c_first_dynamic_bus_num; EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); @@ -48,7 +48,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); * The board info passed can safely be __initdata, but be careful of embedded * pointers (for platform_data, functions, etc) since that won't be copied. */ -int __init i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) +int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) { int status; diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c index 5584e0f82cce..bee59c8c4c93 100644 --- a/drivers/irqchip/irq-aspeed-scu-ic.c +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -215,8 +215,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, int irq, rc = 0; scu_ic->base = of_iomap(node, 0); - if (IS_ERR(scu_ic->base)) { - rc = PTR_ERR(scu_ic->base); + if (!scu_ic->base) { + rc = -ENOMEM; goto err; } diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 559fda8fb3a8..cbd7697bc148 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -254,7 +254,8 @@ static int plic_irq_suspend(void) priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; - for (i = 0; i < priv->nr_irqs; i++) { + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { __assign_bit(i, priv->prio_save, readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); } @@ -285,7 +286,8 @@ static void plic_irq_resume(void) priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; - for (i = 0; i < priv->nr_irqs; i++) { + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { index = BIT_WORD(i); writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); diff --git a/drivers/mfd/ls2k-bmc-core.c b/drivers/mfd/ls2k-bmc-core.c index e162b3c7c9f8..69387dad6661 100644 --- a/drivers/mfd/ls2k-bmc-core.c +++ b/drivers/mfd/ls2k-bmc-core.c @@ -469,7 +469,7 @@ static int ls2k_bmc_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; ddata = devm_kzalloc(&dev->dev, sizeof(*ddata), GFP_KERNEL); - if (IS_ERR(ddata)) { + if (!ddata) { ret = -ENOMEM; goto disable_pci; } @@ -495,9 +495,13 @@ static int ls2k_bmc_probe(struct pci_dev *dev, const struct pci_device_id *id) goto disable_pci; } - return devm_mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, - ls2k_bmc_cells, ARRAY_SIZE(ls2k_bmc_cells), - &dev->resource[0], 0, NULL); + ret = devm_mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, + ls2k_bmc_cells, ARRAY_SIZE(ls2k_bmc_cells), + &dev->resource[0], 0, NULL); + if (ret) + goto disable_pci; + + return 0; disable_pci: pci_disable_device(dev); diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index 6c24426104ba..e1f5e9abb301 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -9,6 +9,7 @@ #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/uaccess.h> +#include <linux/objtool.h> #include <asm/cacheflush.h> #include <asm/sections.h> @@ -86,6 +87,10 @@ static noinline __nocfi void execute_location(void *dst, bool write) func(); pr_err("FAIL: func returned\n"); } +/* + * Explicitly doing the wrong thing for testing. + */ +ANNOTATE_NOCFI_SYM(execute_location); static void execute_user_location(void *dst) { diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c index 36f7379b8e2d..f6b821fc274c 100644 --- a/drivers/misc/ocxl/afu_irq.c +++ b/drivers/misc/ocxl/afu_irq.c @@ -203,7 +203,7 @@ u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id) mutex_lock(&ctx->irq_lock); irq = idr_find(&ctx->irq_idr, irq_id); if (irq) { - xd = irq_get_handler_data(irq->virq); + xd = irq_get_chip_data(irq->virq); addr = xd ? xd->trig_page : 0; } mutex_unlock(&ctx->irq_lock); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 9399bf6c766a..c0ffe0817fd4 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -79,48 +79,6 @@ MODULE_ALIAS("mmc:block"); #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) -/** - * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51) - * - * @stuff : stuff bytes - * @key_mac : The authentication key or the message authentication - * code (MAC) depending on the request/response type. - * The MAC will be delivered in the last (or the only) - * block of data. - * @data : Data to be written or read by signed access. - * @nonce : Random number generated by the host for the requests - * and copied to the response by the RPMB engine. - * @write_counter: Counter value for the total amount of the successful - * authenticated data write requests made by the host. - * @addr : Address of the data to be programmed to or read - * from the RPMB. Address is the serial number of - * the accessed block (half sector 256B). - * @block_count : Number of blocks (half sectors, 256B) requested to be - * read/programmed. - * @result : Includes information about the status of the write counter - * (valid, expired) and result of the access made to the RPMB. - * @req_resp : Defines the type of request and response to/from the memory. - * - * The stuff bytes and big-endian properties are modeled to fit to the spec. - */ -struct rpmb_frame { - u8 stuff[196]; - u8 key_mac[32]; - u8 data[256]; - u8 nonce[16]; - __be32 write_counter; - __be16 addr; - __be16 block_count; - __be16 result; - __be16 req_resp; -} __packed; - -#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */ -#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */ -#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */ -#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */ -#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */ - #define RPMB_FRAME_SIZE sizeof(struct rpmb_frame) #define CHECK_SIZE_NEQ(val) ((val) != sizeof(struct rpmb_frame)) #define CHECK_SIZE_ALIGNED(val) IS_ALIGNED((val), sizeof(struct rpmb_frame)) diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 63ceed89b62e..1a163596ddf5 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -197,13 +197,22 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev) { - struct pci_dev *pdev = NULL; + struct pci_dev *pdev = ndev->ntb.pdev; struct pci_dev *pci_swds = NULL; struct pci_dev *pci_swus = NULL; u32 stat; int rc; if (ndev->ntb.topo == NTB_TOPO_SEC) { + if (ndev->dev_data->is_endpoint) { + rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat); + if (rc) + return rc; + + ndev->lnk_sta = stat; + return 0; + } + /* Locate the pointer to Downstream Switch for this device */ pci_swds = pci_upstream_bridge(ndev->ntb.pdev); if (pci_swds) { @@ -1311,6 +1320,11 @@ static const struct ntb_dev_data dev_data[] = { .mw_count = 2, .mw_idx = 2, }, + { /* for device 0x17d7 */ + .mw_count = 2, + .mw_idx = 2, + .is_endpoint = true, + }, }; static const struct pci_device_id amd_ntb_pci_tbl[] = { @@ -1319,6 +1333,8 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = { { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x17d4), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x17d7), (kernel_ulong_t)&dev_data[2] }, { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, { 0, } }; diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h index 5f337b1572a0..e8c3165fa38b 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.h +++ b/drivers/ntb/hw/amd/ntb_hw_amd.h @@ -168,6 +168,7 @@ enum { struct ntb_dev_data { const unsigned char mw_count; const unsigned int mw_idx; + const bool is_endpoint; }; struct amd_ntb_dev; diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c index 00f0e78f685b..d3ecf25a5162 100644 --- a/drivers/ntb/hw/epf/ntb_hw_epf.c +++ b/drivers/ntb/hw/epf/ntb_hw_epf.c @@ -49,6 +49,7 @@ #define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */ enum pci_barno { + NO_BAR = -1, BAR_0, BAR_1, BAR_2, @@ -57,16 +58,26 @@ enum pci_barno { BAR_5, }; +enum epf_ntb_bar { + BAR_CONFIG, + BAR_PEER_SPAD, + BAR_DB, + BAR_MW1, + BAR_MW2, + BAR_MW3, + BAR_MW4, + NTB_BAR_NUM, +}; + +#define NTB_EPF_MAX_MW_COUNT (NTB_BAR_NUM - BAR_MW1) + struct ntb_epf_dev { struct ntb_dev ntb; struct device *dev; /* Mutex to protect providing commands to NTB EPF */ struct mutex cmd_lock; - enum pci_barno ctrl_reg_bar; - enum pci_barno peer_spad_reg_bar; - enum pci_barno db_reg_bar; - enum pci_barno mw_bar; + const enum pci_barno *barno_map; unsigned int mw_count; unsigned int spad_count; @@ -85,17 +96,6 @@ struct ntb_epf_dev { #define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb) -struct ntb_epf_data { - /* BAR that contains both control region and self spad region */ - enum pci_barno ctrl_reg_bar; - /* BAR that contains peer spad region */ - enum pci_barno peer_spad_reg_bar; - /* BAR that contains Doorbell region and Memory window '1' */ - enum pci_barno db_reg_bar; - /* BAR that contains memory windows*/ - enum pci_barno mw_bar; -}; - static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command, u32 argument) { @@ -144,7 +144,7 @@ static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx) return -EINVAL; } - return idx + 2; + return ndev->barno_map[BAR_MW1 + idx]; } static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx) @@ -413,7 +413,9 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, return -EINVAL; } - bar = idx + ndev->mw_bar; + bar = ntb_epf_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; mw_size = pci_resource_len(ntb->pdev, bar); @@ -455,7 +457,9 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx, if (idx == 0) offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET); - bar = idx + ndev->mw_bar; + bar = ntb_epf_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; if (base) *base = pci_resource_start(ndev->ntb.pdev, bar) + offset; @@ -560,6 +564,11 @@ static int ntb_epf_init_dev(struct ntb_epf_dev *ndev) ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT); ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT); + if (ndev->mw_count > NTB_EPF_MAX_MW_COUNT) { + dev_err(dev, "Unsupported MW count: %u\n", ndev->mw_count); + return -EINVAL; + } + return 0; } @@ -596,14 +605,15 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev, dev_warn(&pdev->dev, "Cannot DMA highmem\n"); } - ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0); + ndev->ctrl_reg = pci_iomap(pdev, ndev->barno_map[BAR_CONFIG], 0); if (!ndev->ctrl_reg) { ret = -EIO; goto err_pci_regions; } - if (ndev->peer_spad_reg_bar) { - ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0); + if (ndev->barno_map[BAR_PEER_SPAD] != ndev->barno_map[BAR_CONFIG]) { + ndev->peer_spad_reg = pci_iomap(pdev, + ndev->barno_map[BAR_PEER_SPAD], 0); if (!ndev->peer_spad_reg) { ret = -EIO; goto err_pci_regions; @@ -614,7 +624,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev, ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz; } - ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0); + ndev->db_reg = pci_iomap(pdev, ndev->barno_map[BAR_DB], 0); if (!ndev->db_reg) { ret = -EIO; goto err_pci_regions; @@ -659,12 +669,7 @@ static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev) static int ntb_epf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - enum pci_barno peer_spad_reg_bar = BAR_1; - enum pci_barno ctrl_reg_bar = BAR_0; - enum pci_barno db_reg_bar = BAR_2; - enum pci_barno mw_bar = BAR_2; struct device *dev = &pdev->dev; - struct ntb_epf_data *data; struct ntb_epf_dev *ndev; int ret; @@ -675,18 +680,10 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev, if (!ndev) return -ENOMEM; - data = (struct ntb_epf_data *)id->driver_data; - if (data) { - peer_spad_reg_bar = data->peer_spad_reg_bar; - ctrl_reg_bar = data->ctrl_reg_bar; - db_reg_bar = data->db_reg_bar; - mw_bar = data->mw_bar; - } + ndev->barno_map = (const enum pci_barno *)id->driver_data; + if (!ndev->barno_map) + return -EINVAL; - ndev->peer_spad_reg_bar = peer_spad_reg_bar; - ndev->ctrl_reg_bar = ctrl_reg_bar; - ndev->db_reg_bar = db_reg_bar; - ndev->mw_bar = mw_bar; ndev->dev = dev; ntb_epf_init_struct(ndev, pdev); @@ -730,30 +727,51 @@ static void ntb_epf_pci_remove(struct pci_dev *pdev) ntb_epf_deinit_pci(ndev); } -static const struct ntb_epf_data j721e_data = { - .ctrl_reg_bar = BAR_0, - .peer_spad_reg_bar = BAR_1, - .db_reg_bar = BAR_2, - .mw_bar = BAR_2, +static const enum pci_barno j721e_map[NTB_BAR_NUM] = { + [BAR_CONFIG] = BAR_0, + [BAR_PEER_SPAD] = BAR_1, + [BAR_DB] = BAR_2, + [BAR_MW1] = BAR_2, + [BAR_MW2] = BAR_3, + [BAR_MW3] = BAR_4, + [BAR_MW4] = BAR_5 }; -static const struct ntb_epf_data mx8_data = { - .ctrl_reg_bar = BAR_0, - .peer_spad_reg_bar = BAR_0, - .db_reg_bar = BAR_2, - .mw_bar = BAR_4, +static const enum pci_barno mx8_map[NTB_BAR_NUM] = { + [BAR_CONFIG] = BAR_0, + [BAR_PEER_SPAD] = BAR_0, + [BAR_DB] = BAR_2, + [BAR_MW1] = BAR_4, + [BAR_MW2] = BAR_5, + [BAR_MW3] = NO_BAR, + [BAR_MW4] = NO_BAR +}; + +static const enum pci_barno rcar_barno[NTB_BAR_NUM] = { + [BAR_CONFIG] = BAR_0, + [BAR_PEER_SPAD] = BAR_0, + [BAR_DB] = BAR_4, + [BAR_MW1] = BAR_2, + [BAR_MW2] = NO_BAR, + [BAR_MW3] = NO_BAR, + [BAR_MW4] = NO_BAR, }; static const struct pci_device_id ntb_epf_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, - .driver_data = (kernel_ulong_t)&j721e_data, + .driver_data = (kernel_ulong_t)j721e_map, }, { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809), .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, - .driver_data = (kernel_ulong_t)&mx8_data, + .driver_data = (kernel_ulong_t)mx8_map, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0030), + .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, + .driver_data = (kernel_ulong_t)rcar_barno, }, { }, }; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 4f775c3e218f..eb875e3db2e3 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -59,6 +59,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> +#include <linux/mutex.h> #include "linux/ntb.h" #include "linux/ntb_transport.h" @@ -241,6 +242,9 @@ struct ntb_transport_ctx { struct work_struct link_cleanup; struct dentry *debugfs_node_dir; + + /* Make sure workq of link event be executed serially */ + struct mutex link_event_lock; }; enum { @@ -1024,6 +1028,7 @@ static void ntb_transport_link_cleanup_work(struct work_struct *work) struct ntb_transport_ctx *nt = container_of(work, struct ntb_transport_ctx, link_cleanup); + guard(mutex)(&nt->link_event_lock); ntb_transport_link_cleanup(nt); } @@ -1047,6 +1052,8 @@ static void ntb_transport_link_work(struct work_struct *work) u32 val; int rc = 0, i, spad; + guard(mutex)(&nt->link_event_lock); + /* send the local info, in the opposite order of the way we read it */ if (nt->use_msi) { diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 012fcfc79a73..a01178caf15b 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -36,6 +36,7 @@ struct nvme_dhchap_queue_context { u8 status; u8 dhgroup_id; u8 hash_id; + u8 sc_c; size_t hash_len; u8 c1[64]; u8 c2[64]; @@ -154,6 +155,8 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; + chap->sc_c = data->sc_c; + return size; } @@ -489,7 +492,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; - memset(buf, 0, sizeof(buf)); + *buf = chap->sc_c; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; @@ -500,6 +503,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, strlen(ctrl->opts->host->nqn)); if (ret) goto out; + memset(buf, 0, sizeof(buf)); ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3da980dc60d9..543e17aead12 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -182,12 +182,14 @@ void nvme_mpath_start_request(struct request *rq) struct nvme_ns *ns = rq->q->queuedata; struct gendisk *disk = ns->head->disk; - if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { + if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && + !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) { atomic_inc(&ns->ctrl->nr_active); nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE; } - if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) + if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) || + (nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 1413788ca7d5..9a96df1a511c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1081,6 +1081,9 @@ static void nvme_tcp_write_space(struct sock *sk) queue = sk->sk_user_data; if (likely(queue && sk_stream_is_writeable(sk))) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + /* Ensure pending TLS partial records are retried */ + if (nvme_tcp_queue_tls(queue)) + queue->write_space(sk); queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } read_unlock_bh(&sk->sk_callback_lock); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 788ccb6ab287..65c3c23255b7 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -163,7 +163,7 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph * @out_irq: structure of_phandle_args updated by this function * * This function is a low-level interrupt tree walking function. It - * can be used to do a partial walk with synthetized reg and interrupts + * can be used to do a partial walk with synthesized reg and interrupts * properties, for example when resolving PCI interrupts when no device * node exist for the parent. It takes an interrupt specifier structure as * input, walks the tree looking for any interrupt-map properties, translates diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 1af6f52d0708..255e8362f600 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -135,7 +135,7 @@ static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain); * @nb: Notifier block to register * * Register for notification on overlay operations on device tree nodes. The - * reported actions definied by @of_reconfig_change. The notifier callback + * reported actions defined by @of_reconfig_change. The notifier callback * furthermore receives a pointer to the affected device tree node. * * Note that a notifier callback is not supposed to store pointers to a device diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index e3503ec20f6c..388e9ec2cccf 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -4300,6 +4300,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add) unittest(!np, "Child device tree node is not removed\n"); child_dev = device_find_any_child(&pdev->dev); unittest(!child_dev, "Child device is not removed\n"); + put_device(child_dev); } failed: diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 7065a8e5f9b1..f94f5d384362 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -306,6 +306,7 @@ config VGA_ARB bool "VGA Arbitration" if EXPERT default y depends on (PCI && !S390) + select SCREEN_INFO if X86 help Some "legacy" VGA devices implemented on PCI typically have the same hard-decoded addresses as they did on ISA. When multiple PCI devices diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1eac012a8226..c0e1194a936b 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -255,7 +255,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) u16 flags, mme; u8 cap; - cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Validate that the MSI feature is actually enabled. */ diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 1bd5bf4a6097..b4b62b9ccc45 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -192,6 +192,12 @@ static void vmd_pci_msi_enable(struct irq_data *data) data->chip->irq_unmask(data); } +static unsigned int vmd_pci_msi_startup(struct irq_data *data) +{ + vmd_pci_msi_enable(data); + return 0; +} + static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; @@ -210,6 +216,11 @@ static void vmd_pci_msi_disable(struct irq_data *data) vmd_irq_disable(data->parent_data); } +static void vmd_pci_msi_shutdown(struct irq_data *data) +{ + vmd_pci_msi_disable(data); +} + static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", .irq_compose_msi_msg = vmd_compose_msi_msg, @@ -309,6 +320,8 @@ static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain, if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) return false; + info->chip->irq_startup = vmd_pci_msi_startup; + info->chip->irq_shutdown = vmd_pci_msi_shutdown; info->chip->irq_enable = vmd_pci_msi_enable; info->chip->irq_disable = vmd_pci_msi_disable; return true; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c83e75a0ec12..0ce98e18b5a8 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -538,14 +538,10 @@ static void pci_read_bridge_windows(struct pci_dev *bridge) } if (io) { bridge->io_window = 1; - pci_read_bridge_io(bridge, - pci_resource_n(bridge, PCI_BRIDGE_IO_WINDOW), - true); + pci_read_bridge_io(bridge, &res, true); } - pci_read_bridge_mmio(bridge, - pci_resource_n(bridge, PCI_BRIDGE_MEM_WINDOW), - true); + pci_read_bridge_mmio(bridge, &res, true); /* * DECchip 21050 pass 2 errata: the bridge may miss an address @@ -583,10 +579,7 @@ static void pci_read_bridge_windows(struct pci_dev *bridge) bridge->pref_64_window = 1; } - pci_read_bridge_mmio_pref(bridge, - pci_resource_n(bridge, - PCI_BRIDGE_PREF_MEM_WINDOW), - true); + pci_read_bridge_mmio_pref(bridge, &res, true); } void pci_read_bridge_bases(struct pci_bus *child) diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c index b58f94ee4891..436fa7f4c387 100644 --- a/drivers/pci/vgaarb.c +++ b/drivers/pci/vgaarb.c @@ -556,10 +556,8 @@ EXPORT_SYMBOL(vga_put); static bool vga_is_firmware_default(struct pci_dev *pdev) { -#ifdef CONFIG_SCREEN_INFO - struct screen_info *si = &screen_info; - - return pdev == screen_info_pci_dev(si); +#if defined CONFIG_X86 + return pdev == screen_info_pci_dev(&screen_info); #else return false; #endif diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 4a8dc8d0a4b7..2933c41c77c8 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -406,6 +406,16 @@ config RTC_DRV_MAX77686 This driver can also be built as a module. If so, the module will be called rtc-max77686. +config RTC_DRV_SPACEMIT_P1 + tristate "SpacemiT P1 RTC" + depends on ARCH_SPACEMIT || COMPILE_TEST + select MFD_SPACEMIT_P1 + default ARCH_SPACEMIT + help + Enable support for the RTC function in the SpacemiT P1 PMIC. + This driver can also be built as a module, which will be called + "spacemit-p1-rtc". + config RTC_DRV_NCT3018Y tristate "Nuvoton NCT3018Y" depends on OF @@ -2044,20 +2054,6 @@ config RTC_DRV_RENESAS_RTCA3 This driver can also be built as a module, if so, the module will be called "rtc-rtca3". -comment "HID Sensor RTC drivers" - -config RTC_DRV_HID_SENSOR_TIME - tristate "HID Sensor Time" - depends on USB_HID - depends on HID_SENSOR_HUB && IIO - select HID_SENSOR_IIO_COMMON - help - Say yes here to build support for the HID Sensors of type Time. - This drivers makes such sensors available as RTCs. - - If this driver is compiled as a module, it will be named - rtc-hid-sensor-time. - config RTC_DRV_GOLDFISH tristate "Goldfish Real Time Clock" depends on HAS_IOMEM @@ -2132,4 +2128,18 @@ config RTC_DRV_S32G This RTC module can be used as a wakeup source. Please note that it is not battery-powered. +comment "HID Sensor RTC drivers" + +config RTC_DRV_HID_SENSOR_TIME + tristate "HID Sensor Time" + depends on USB_HID + depends on HID_SENSOR_HUB && IIO + select HID_SENSOR_IIO_COMMON + help + Say yes here to build support for the HID Sensors of type Time. + This drivers makes such sensors available as RTCs. + + If this driver is compiled as a module, it will be named + rtc-hid-sensor-time. + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 610a9ee5fd33..8221bda6e6dc 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -172,6 +172,7 @@ obj-$(CONFIG_RTC_DRV_SD2405AL) += rtc-sd2405al.o obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o +obj-$(CONFIG_RTC_DRV_SPACEMIT_P1) += rtc-spacemit-p1.o obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index dc741ba29fa3..b8b298efd9a9 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) else err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + /* + * Check for potential race described above. If the waiting for next + * second, and the second just ticked since the check above, either + * + * 1) It ticked after the alarm was set, and an alarm irq should be + * generated. + * + * 2) It ticked before the alarm was set, and alarm irq most likely will + * not be generated. + * + * While we cannot easily check for which of these two scenarios we + * are in, we can return -ETIME to signal that the timer has already + * expired, which is true in both cases. + */ + if ((scheduled - now) <= 1) { + err = __rtc_read_time(rtc, &tm); + if (err) + return err; + now = rtc_tm_to_time64(&tm); + if (scheduled <= now) + return -ETIME; + } + trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } @@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); + if (!err && rtc->ops && rtc->ops->alarm_irq_enable) + err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1); + if (err) + goto out; } else { rtc_timer_remove(rtc, &rtc->uie_rtctimer); } diff --git a/drivers/rtc/rtc-amlogic-a4.c b/drivers/rtc/rtc-amlogic-a4.c index 09d78c2cc691..1928b29c1045 100644 --- a/drivers/rtc/rtc-amlogic-a4.c +++ b/drivers/rtc/rtc-amlogic-a4.c @@ -72,13 +72,6 @@ struct aml_rtc_data { const struct aml_rtc_config *config; }; -static const struct regmap_config aml_rtc_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, - .max_register = RTC_REAL_TIME, -}; - static inline u32 gray_to_binary(u32 gray) { u32 bcd = gray; @@ -328,6 +321,13 @@ static int aml_rtc_probe(struct platform_device *pdev) void __iomem *base; int ret = 0; + const struct regmap_config aml_rtc_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = RTC_REAL_TIME, + }; + rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c index c170345ac076..8b6b35716f53 100644 --- a/drivers/rtc/rtc-cpcap.c +++ b/drivers/rtc/rtc-cpcap.c @@ -268,6 +268,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev) return err; rtc->alarm_irq = platform_get_irq(pdev, 0); + rtc->alarm_enabled = true; err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL, cpcap_rtc_alarm_irq, IRQF_TRIGGER_NONE | IRQF_ONESHOT, diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index fa8bf82df948..b4f44999ef0f 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c @@ -112,48 +112,6 @@ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) return true; } -static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) -{ - efi_time_t eft; - efi_status_t status; - - /* - * As of EFI v1.10, this call always returns an unsupported status - */ - status = efi.get_wakeup_time((efi_bool_t *)&wkalrm->enabled, - (efi_bool_t *)&wkalrm->pending, &eft); - - if (status != EFI_SUCCESS) - return -EINVAL; - - if (!convert_from_efi_time(&eft, &wkalrm->time)) - return -EIO; - - return rtc_valid_tm(&wkalrm->time); -} - -static int efi_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) -{ - efi_time_t eft; - efi_status_t status; - - convert_to_efi_time(&wkalrm->time, &eft); - - /* - * XXX Fixme: - * As of EFI 0.92 with the firmware I have on my - * machine this call does not seem to work quite - * right - * - * As of v1.10, this call always returns an unsupported status - */ - status = efi.set_wakeup_time((efi_bool_t)wkalrm->enabled, &eft); - - dev_warn(dev, "write status is %d\n", (int)status); - - return status == EFI_SUCCESS ? 0 : -EINVAL; -} - static int efi_read_time(struct device *dev, struct rtc_time *tm) { efi_status_t status; @@ -188,17 +146,13 @@ static int efi_set_time(struct device *dev, struct rtc_time *tm) static int efi_procfs(struct device *dev, struct seq_file *seq) { - efi_time_t eft, alm; + efi_time_t eft; efi_time_cap_t cap; - efi_bool_t enabled, pending; - struct rtc_device *rtc = dev_get_drvdata(dev); memset(&eft, 0, sizeof(eft)); - memset(&alm, 0, sizeof(alm)); memset(&cap, 0, sizeof(cap)); efi.get_time(&eft, &cap); - efi.get_wakeup_time(&enabled, &pending, &alm); seq_printf(seq, "Time\t\t: %u:%u:%u.%09u\n" @@ -214,26 +168,6 @@ static int efi_procfs(struct device *dev, struct seq_file *seq) /* XXX fixme: convert to string? */ seq_printf(seq, "Timezone\t: %u\n", eft.timezone); - if (test_bit(RTC_FEATURE_ALARM, rtc->features)) { - seq_printf(seq, - "Alarm Time\t: %u:%u:%u.%09u\n" - "Alarm Date\t: %u-%u-%u\n" - "Alarm Daylight\t: %u\n" - "Enabled\t\t: %s\n" - "Pending\t\t: %s\n", - alm.hour, alm.minute, alm.second, alm.nanosecond, - alm.year, alm.month, alm.day, - alm.daylight, - enabled == 1 ? "yes" : "no", - pending == 1 ? "yes" : "no"); - - if (alm.timezone == EFI_UNSPECIFIED_TIMEZONE) - seq_puts(seq, "Timezone\t: unspecified\n"); - else - /* XXX fixme: convert to string? */ - seq_printf(seq, "Timezone\t: %u\n", alm.timezone); - } - /* * now prints the capabilities */ @@ -249,8 +183,6 @@ static int efi_procfs(struct device *dev, struct seq_file *seq) static const struct rtc_class_ops efi_rtc_ops = { .read_time = efi_read_time, .set_time = efi_set_time, - .read_alarm = efi_read_alarm, - .set_alarm = efi_set_alarm, .proc = efi_procfs, }; @@ -271,11 +203,7 @@ static int __init efi_rtc_probe(struct platform_device *dev) platform_set_drvdata(dev, rtc); rtc->ops = &efi_rtc_ops; - clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); - if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES)) - set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features); - else - clear_bit(RTC_FEATURE_ALARM, rtc->features); + clear_bit(RTC_FEATURE_ALARM, rtc->features); device_init_wakeup(&dev->dev, true); diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c index 9b44839a7402..5fc52dc64213 100644 --- a/drivers/rtc/rtc-isl12022.c +++ b/drivers/rtc/rtc-isl12022.c @@ -413,6 +413,7 @@ static int isl12022_setup_irq(struct device *dev, int irq) if (ret) return ret; + isl12022->irq_enabled = true; ret = devm_request_threaded_irq(dev, irq, NULL, isl12022_rtc_interrupt, IRQF_SHARED | IRQF_ONESHOT, diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c index 47e9ebf58ffc..21eceb9e2e13 100644 --- a/drivers/rtc/rtc-meson.c +++ b/drivers/rtc/rtc-meson.c @@ -72,7 +72,6 @@ static const struct regmap_config meson_rtc_peripheral_regmap_config = { .val_bits = 32, .reg_stride = 4, .max_register = RTC_REG4, - .fast_io = true, }; /* RTC front-end serialiser controls */ diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c index 9f8b5d4a8f6b..184c6d142801 100644 --- a/drivers/rtc/rtc-optee.c +++ b/drivers/rtc/rtc-optee.c @@ -5,19 +5,104 @@ #include <linux/device.h> #include <linux/kernel.h> +#include <linux/kthread.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/tee_drv.h> -#define RTC_INFO_VERSION 0x1 +#define RTC_INFO_VERSION 0x1 -#define TA_CMD_RTC_GET_INFO 0x0 -#define TA_CMD_RTC_GET_TIME 0x1 -#define TA_CMD_RTC_SET_TIME 0x2 -#define TA_CMD_RTC_GET_OFFSET 0x3 -#define TA_CMD_RTC_SET_OFFSET 0x4 +#define TA_RTC_FEATURE_CORRECTION BIT(0) +#define TA_RTC_FEATURE_ALARM BIT(1) +#define TA_RTC_FEATURE_WAKEUP_ALARM BIT(2) -#define TA_RTC_FEATURE_CORRECTION BIT(0) +enum rtc_optee_pta_cmd { + /* PTA_CMD_RTC_GET_INFO - Get RTC information + * + * [out] memref[0] RTC buffer memory reference containing a struct pta_rtc_info + */ + PTA_CMD_RTC_GET_INF = 0x0, + + /* + * PTA_CMD_RTC_GET_TIME - Get time from RTC + * + * [out] memref[0] RTC buffer memory reference containing a struct pta_rtc_time + */ + PTA_CMD_RTC_GET_TIME = 0x1, + + /* + * PTA_CMD_RTC_SET_TIME - Set time from RTC + * + * [in] memref[0] RTC buffer memory reference containing a struct pta_rtc_time to be + * used as RTC time + */ + PTA_CMD_RTC_SET_TIME = 0x2, + + /* + * PTA_CMD_RTC_GET_OFFSET - Get RTC offset + * + * [out] value[0].a RTC offset (signed 32bit value) + */ + PTA_CMD_RTC_GET_OFFSET = 0x3, + + /* + * PTA_CMD_RTC_SET_OFFSET - Set RTC offset + * + * [in] value[0].a RTC offset to be set (signed 32bit value) + */ + PTA_CMD_RTC_SET_OFFSET = 0x4, + + /* + * PTA_CMD_RTC_READ_ALARM - Read RTC alarm + * + * [out] memref[0] RTC buffer memory reference containing a struct pta_rtc_alarm + */ + PTA_CMD_RTC_READ_ALARM = 0x5, + + /* + * PTA_CMD_RTC_SET_ALARM - Set RTC alarm + * + * [in] memref[0] RTC buffer memory reference containing a struct pta_rtc_alarm to be + * used as RTC alarm + */ + PTA_CMD_RTC_SET_ALARM = 0x6, + + /* + * PTA_CMD_RTC_ENABLE_ALARM - Enable Alarm + * + * [in] value[0].a RTC IRQ flag (uint32_t), 0 to disable the alarm, 1 to enable + */ + PTA_CMD_RTC_ENABLE_ALARM = 0x7, + + /* + * PTA_CMD_RTC_WAIT_ALARM - Get alarm event + * + * [out] value[0].a RTC wait alarm return status (uint32_t): + * - 0: No alarm event + * - 1: Alarm event occurred + * - 2: Alarm event canceled + */ + PTA_CMD_RTC_WAIT_ALARM = 0x8, + + /* + * PTA_CMD_RTC_CANCEL_WAIT - Cancel wait for alarm event + */ + PTA_CMD_RTC_CANCEL_WAIT = 0x9, + + /* + * PTA_CMD_RTC_SET_WAKE_ALARM_STATUS - Set RTC wake alarm status flag + * + * [in] value[0].a RTC IRQ wake alarm flag (uint32_t), 0 to disable the wake up + * capability, 1 to enable. + */ + PTA_CMD_RTC_SET_WAKE_ALARM_STATUS = 0xA, +}; + +enum rtc_wait_alarm_status { + WAIT_ALARM_RESET = 0x0, + WAIT_ALARM_ALARM_OCCURRED = 0x1, + WAIT_ALARM_CANCELED = 0x2, +}; struct optee_rtc_time { u32 tm_sec; @@ -29,6 +114,12 @@ struct optee_rtc_time { u32 tm_wday; }; +struct optee_rtc_alarm { + u8 enabled; + u8 pending; + struct optee_rtc_time time; +}; + struct optee_rtc_info { u64 version; u64 features; @@ -41,15 +132,21 @@ struct optee_rtc_info { * @dev: OP-TEE based RTC device. * @ctx: OP-TEE context handler. * @session_id: RTC TA session identifier. + * @session2_id: RTC wait alarm session identifier. * @shm: Memory pool shared with RTC device. * @features: Bitfield of RTC features + * @alarm_task: RTC wait alamr task. + * @rtc: RTC device. */ struct optee_rtc { struct device *dev; struct tee_context *ctx; u32 session_id; + u32 session2_id; struct tee_shm *shm; u64 features; + struct task_struct *alarm_task; + struct rtc_device *rtc; }; static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm) @@ -60,7 +157,7 @@ static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm) struct tee_param param[4] = {0}; int ret; - inv_arg.func = TA_CMD_RTC_GET_TIME; + inv_arg.func = PTA_CMD_RTC_GET_TIME; inv_arg.session = priv->session_id; inv_arg.num_params = 4; @@ -97,19 +194,10 @@ static int optee_rtc_settime(struct device *dev, struct rtc_time *tm) struct optee_rtc *priv = dev_get_drvdata(dev); struct tee_ioctl_invoke_arg inv_arg = {0}; struct tee_param param[4] = {0}; - struct optee_rtc_time optee_tm; - void *rtc_data; + struct optee_rtc_time *optee_tm; int ret; - optee_tm.tm_sec = tm->tm_sec; - optee_tm.tm_min = tm->tm_min; - optee_tm.tm_hour = tm->tm_hour; - optee_tm.tm_mday = tm->tm_mday; - optee_tm.tm_mon = tm->tm_mon; - optee_tm.tm_year = tm->tm_year + 1900; - optee_tm.tm_wday = tm->tm_wday; - - inv_arg.func = TA_CMD_RTC_SET_TIME; + inv_arg.func = PTA_CMD_RTC_SET_TIME; inv_arg.session = priv->session_id; inv_arg.num_params = 4; @@ -117,11 +205,17 @@ static int optee_rtc_settime(struct device *dev, struct rtc_time *tm) param[0].u.memref.shm = priv->shm; param[0].u.memref.size = sizeof(struct optee_rtc_time); - rtc_data = tee_shm_get_va(priv->shm, 0); - if (IS_ERR(rtc_data)) - return PTR_ERR(rtc_data); + optee_tm = tee_shm_get_va(priv->shm, 0); + if (IS_ERR(optee_tm)) + return PTR_ERR(optee_tm); - memcpy(rtc_data, &optee_tm, sizeof(struct optee_rtc_time)); + optee_tm->tm_min = tm->tm_min; + optee_tm->tm_sec = tm->tm_sec; + optee_tm->tm_hour = tm->tm_hour; + optee_tm->tm_mday = tm->tm_mday; + optee_tm->tm_mon = tm->tm_mon; + optee_tm->tm_year = tm->tm_year + 1900; + optee_tm->tm_wday = tm->tm_wday; ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); if (ret < 0 || inv_arg.ret != 0) @@ -140,7 +234,7 @@ static int optee_rtc_readoffset(struct device *dev, long *offset) if (!(priv->features & TA_RTC_FEATURE_CORRECTION)) return -EOPNOTSUPP; - inv_arg.func = TA_CMD_RTC_GET_OFFSET; + inv_arg.func = PTA_CMD_RTC_GET_OFFSET; inv_arg.session = priv->session_id; inv_arg.num_params = 4; @@ -165,7 +259,7 @@ static int optee_rtc_setoffset(struct device *dev, long offset) if (!(priv->features & TA_RTC_FEATURE_CORRECTION)) return -EOPNOTSUPP; - inv_arg.func = TA_CMD_RTC_SET_OFFSET; + inv_arg.func = PTA_CMD_RTC_SET_OFFSET; inv_arg.session = priv->session_id; inv_arg.num_params = 4; @@ -179,13 +273,228 @@ static int optee_rtc_setoffset(struct device *dev, long offset) return 0; } +static int optee_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct optee_rtc *priv = dev_get_drvdata(dev); + struct tee_ioctl_invoke_arg inv_arg = {0}; + struct optee_rtc_alarm *optee_alarm; + struct tee_param param[1] = {0}; + int ret; + + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + return -EOPNOTSUPP; + + inv_arg.func = PTA_CMD_RTC_READ_ALARM; + inv_arg.session = priv->session_id; + inv_arg.num_params = 1; + + /* Fill invoke cmd params */ + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; + param[0].u.memref.shm = priv->shm; + param[0].u.memref.size = sizeof(struct optee_rtc_alarm); + + ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); + if (ret < 0 || inv_arg.ret != 0) + return ret ? ret : -EPROTO; + + optee_alarm = tee_shm_get_va(priv->shm, 0); + if (IS_ERR(optee_alarm)) + return PTR_ERR(optee_alarm); + + if (param[0].u.memref.size != sizeof(*optee_alarm)) + return -EPROTO; + + alarm->enabled = optee_alarm->enabled; + alarm->pending = optee_alarm->pending; + alarm->time.tm_sec = optee_alarm->time.tm_sec; + alarm->time.tm_min = optee_alarm->time.tm_min; + alarm->time.tm_hour = optee_alarm->time.tm_hour; + alarm->time.tm_mday = optee_alarm->time.tm_mday; + alarm->time.tm_mon = optee_alarm->time.tm_mon; + alarm->time.tm_year = optee_alarm->time.tm_year - 1900; + alarm->time.tm_wday = optee_alarm->time.tm_wday; + alarm->time.tm_yday = rtc_year_days(alarm->time.tm_mday, + alarm->time.tm_mon, + alarm->time.tm_year); + + return 0; +} + +static int optee_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct optee_rtc *priv = dev_get_drvdata(dev); + struct tee_ioctl_invoke_arg inv_arg = {0}; + struct optee_rtc_alarm *optee_alarm; + struct tee_param param[1] = {0}; + int ret; + + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + return -EOPNOTSUPP; + + inv_arg.func = PTA_CMD_RTC_SET_ALARM; + inv_arg.session = priv->session_id; + inv_arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + param[0].u.memref.shm = priv->shm; + param[0].u.memref.size = sizeof(struct optee_rtc_alarm); + + optee_alarm = tee_shm_get_va(priv->shm, 0); + if (IS_ERR(optee_alarm)) + return PTR_ERR(optee_alarm); + + optee_alarm->enabled = alarm->enabled; + optee_alarm->pending = alarm->pending; + optee_alarm->time.tm_sec = alarm->time.tm_sec; + optee_alarm->time.tm_min = alarm->time.tm_min; + optee_alarm->time.tm_hour = alarm->time.tm_hour; + optee_alarm->time.tm_mday = alarm->time.tm_mday; + optee_alarm->time.tm_mon = alarm->time.tm_mon; + optee_alarm->time.tm_year = alarm->time.tm_year + 1900; + optee_alarm->time.tm_wday = alarm->time.tm_wday; + + ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); + if (ret < 0 || inv_arg.ret != 0) + return ret ? ret : -EPROTO; + + return 0; +} + +static int optee_rtc_enable_alarm(struct device *dev, unsigned int enabled) +{ + struct optee_rtc *priv = dev_get_drvdata(dev); + struct tee_ioctl_invoke_arg inv_arg = {0}; + struct tee_param param[1] = {0}; + int ret; + + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + return -EOPNOTSUPP; + + inv_arg.func = PTA_CMD_RTC_ENABLE_ALARM; + inv_arg.session = priv->session_id; + inv_arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = (bool)enabled; + + ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); + if (ret < 0 || inv_arg.ret != 0) + return ret ? ret : -EPROTO; + + return 0; +} + static const struct rtc_class_ops optee_rtc_ops = { - .read_time = optee_rtc_readtime, - .set_time = optee_rtc_settime, - .set_offset = optee_rtc_setoffset, - .read_offset = optee_rtc_readoffset, + .read_time = optee_rtc_readtime, + .set_time = optee_rtc_settime, + .set_offset = optee_rtc_setoffset, + .read_offset = optee_rtc_readoffset, + .read_alarm = optee_rtc_read_alarm, + .set_alarm = optee_rtc_set_alarm, + .alarm_irq_enable = optee_rtc_enable_alarm, }; +static int optee_rtc_wait_alarm(struct device *dev, int *return_status) +{ + struct optee_rtc *priv = dev_get_drvdata(dev); + struct tee_ioctl_invoke_arg inv_arg = {0}; + struct tee_param param[1] = {0}; + int ret; + + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + return -EOPNOTSUPP; + + inv_arg.func = PTA_CMD_RTC_WAIT_ALARM; + inv_arg.session = priv->session2_id; + inv_arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); + if (ret < 0 || inv_arg.ret != 0) + return ret ? ret : -EPROTO; + + *return_status = param[0].u.value.a; + + return 0; +} + +static int optee_rtc_cancel_wait_alarm(struct device *dev) +{ + struct optee_rtc *priv = dev_get_drvdata(dev); + struct tee_ioctl_invoke_arg inv_arg = {0}; + struct tee_param param[1] = {0}; + int ret; + + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + return -EOPNOTSUPP; + + inv_arg.func = PTA_CMD_RTC_CANCEL_WAIT; + inv_arg.session = priv->session_id; + inv_arg.num_params = 0; + + ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); + if (ret < 0 || inv_arg.ret != 0) + return ret ? ret : -EPROTO; + + return 0; +} + +static int optee_rtc_set_alarm_wake_status(struct device *dev, bool status) +{ + struct optee_rtc *priv = dev_get_drvdata(dev); + struct tee_ioctl_invoke_arg inv_arg = {0}; + struct tee_param param[1] = {0}; + int ret; + + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + return -EOPNOTSUPP; + + inv_arg.func = PTA_CMD_RTC_SET_WAKE_ALARM_STATUS; + inv_arg.session = priv->session_id; + inv_arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = status; + + ret = tee_client_invoke_func(priv->ctx, &inv_arg, param); + + if (ret < 0 || inv_arg.ret != 0) + return ret ? ret : -EPROTO; + + return 0; +} + +static int optee_rtc_handle_alarm_event(void *data) +{ + struct optee_rtc *priv = (struct optee_rtc *)data; + int wait_alarm_return_status = 0; + int ret; + + while (!kthread_should_stop()) { + ret = optee_rtc_wait_alarm(priv->dev, &wait_alarm_return_status); + if (ret) { + dev_err(priv->dev, "Failed to wait for alarm: %d\n", ret); + return ret; + } + switch (wait_alarm_return_status) { + case WAIT_ALARM_ALARM_OCCURRED: + dev_dbg(priv->dev, "Alarm occurred\n"); + rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF); + break; + case WAIT_ALARM_CANCELED: + dev_dbg(priv->dev, "Alarm canceled\n"); + break; + default: + dev_warn(priv->dev, "Unknown return status: %d\n", + wait_alarm_return_status); + break; + } + } + + return 0; +} + static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc, u64 *features) { @@ -196,7 +505,7 @@ static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc, struct optee_rtc_time *tm; int ret; - inv_arg.func = TA_CMD_RTC_GET_INFO; + inv_arg.func = PTA_CMD_RTC_GET_INF; inv_arg.session = priv->session_id; inv_arg.num_params = 4; @@ -241,14 +550,13 @@ static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) static int optee_rtc_probe(struct device *dev) { struct tee_client_device *rtc_device = to_tee_client_device(dev); - struct tee_ioctl_open_session_arg sess_arg; + struct tee_ioctl_open_session_arg sess2_arg = {0}; + struct tee_ioctl_open_session_arg sess_arg = {0}; struct optee_rtc *priv; struct rtc_device *rtc; struct tee_shm *shm; int ret, err; - memset(&sess_arg, 0, sizeof(sess_arg)); - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -257,12 +565,14 @@ static int optee_rtc_probe(struct device *dev) if (IS_ERR(rtc)) return PTR_ERR(rtc); + priv->rtc = rtc; + /* Open context with TEE driver */ priv->ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL); if (IS_ERR(priv->ctx)) return -ENODEV; - /* Open session with rtc Trusted App */ + /* Open first session with rtc Pseudo Trusted App */ export_uuid(sess_arg.uuid, &rtc_device->id.uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; @@ -274,6 +584,11 @@ static int optee_rtc_probe(struct device *dev) } priv->session_id = sess_arg.session; + /* + * Shared memory is used for passing an instance of either struct optee_rtc_info, + * struct optee_rtc_time or struct optee_rtc_alarm to OP-TEE service. + * The former is by definition large enough to cover both parameter cases. + */ shm = tee_shm_alloc_kernel_buf(priv->ctx, sizeof(struct optee_rtc_info)); if (IS_ERR(shm)) { dev_err(priv->dev, "tee_shm_alloc_kernel_buf failed\n"); @@ -293,19 +608,70 @@ static int optee_rtc_probe(struct device *dev) goto out_shm; } + /* Handle feature's related setup before registering to rtc framework */ + if (priv->features & TA_RTC_FEATURE_ALARM) { + priv->alarm_task = kthread_create(optee_rtc_handle_alarm_event, + priv, "rtc_alarm_evt"); + if (IS_ERR(priv->alarm_task)) { + dev_err(dev, "Failed to create alarm thread\n"); + err = PTR_ERR(priv->alarm_task); + goto out_shm; + } + + /* + * In case of supported alarm feature on optee side, we create a kthread + * that will, in a new optee session, call a PTA interface "rtc_wait_alarm". + * This call return in case of alarm and in case of canceled alarm. + * The new optee session is therefore only needed in this case as we cannot + * use the same session for parallel calls to optee PTA. + * Hence one session is reserved to wait for alarms and the other to make + * standard calls to RTC PTA. + */ + + /* Open second session with rtc Trusted App */ + export_uuid(sess2_arg.uuid, &rtc_device->id.uuid); + sess2_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + + ret = tee_client_open_session(priv->ctx, &sess2_arg, NULL); + if (ret < 0 || sess2_arg.ret != 0) { + dev_err(dev, "tee_client_open_session failed, err: %x\n", sess2_arg.ret); + err = -EINVAL; + goto out_thrd; + } + priv->session2_id = sess2_arg.session; + + if (priv->features & TA_RTC_FEATURE_WAKEUP_ALARM) + device_init_wakeup(dev, true); + } + err = devm_rtc_register_device(rtc); if (err) - goto out_shm; + goto out_wk; /* - * We must clear this bit after registering because rtc_register_device - * will set it if it sees that .set_offset is provided. + * We must clear those bits after registering because registering a rtc_device + * will set them if it sees that .set_offset and .set_alarm are provided. */ if (!(priv->features & TA_RTC_FEATURE_CORRECTION)) clear_bit(RTC_FEATURE_CORRECTION, rtc->features); + if (!(priv->features & TA_RTC_FEATURE_ALARM)) + clear_bit(RTC_FEATURE_ALARM, rtc->features); - return 0; + /* Start the thread after the rtc is setup */ + if (priv->alarm_task) { + wake_up_process(priv->alarm_task); + dev_dbg(dev, "Wait alarm thread successfully started\n"); + } + return 0; +out_wk: + if (priv->features & TA_RTC_FEATURE_ALARM) { + device_init_wakeup(dev, false); + tee_client_close_session(priv->ctx, priv->session2_id); + } +out_thrd: + if (priv->features & TA_RTC_FEATURE_ALARM) + kthread_stop(priv->alarm_task); out_shm: tee_shm_free(priv->shm); out_sess: @@ -320,12 +686,34 @@ static int optee_rtc_remove(struct device *dev) { struct optee_rtc *priv = dev_get_drvdata(dev); + if (priv->features & TA_RTC_FEATURE_ALARM) { + optee_rtc_cancel_wait_alarm(dev); + kthread_stop(priv->alarm_task); + device_init_wakeup(dev, false); + tee_client_close_session(priv->ctx, priv->session2_id); + } + + tee_shm_free(priv->shm); tee_client_close_session(priv->ctx, priv->session_id); tee_client_close_context(priv->ctx); return 0; } +static int optee_rtc_suspend(struct device *dev) +{ + int res = optee_rtc_set_alarm_wake_status(dev, device_may_wakeup(dev)); + + if (res) { + dev_err(dev, "Unable to transmit wakeup information to optee rtc\n"); + return res; + } + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(optee_rtc_pm_ops, optee_rtc_suspend, NULL); + static const struct tee_client_device_id optee_rtc_id_table[] = { {UUID_INIT(0xf389f8c8, 0x845f, 0x496c, 0x8b, 0xbe, 0xd6, 0x4b, 0xd2, 0x4c, 0x92, 0xfd)}, @@ -341,6 +729,7 @@ static struct tee_client_driver optee_rtc_driver = { .bus = &tee_bus_type, .probe = optee_rtc_probe, .remove = optee_rtc_remove, + .pm = pm_sleep_ptr(&optee_rtc_pm_ops), }, }; diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 2e1ac0c42e93..bb4fe81d3d62 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -42,6 +42,7 @@ #define PCF2127_BIT_CTRL2_AF BIT(4) #define PCF2127_BIT_CTRL2_TSF2 BIT(5) #define PCF2127_BIT_CTRL2_WDTF BIT(6) +#define PCF2127_BIT_CTRL2_MSF BIT(7) /* Control register 3 */ #define PCF2127_REG_CTRL3 0x02 #define PCF2127_BIT_CTRL3_BLIE BIT(0) @@ -96,7 +97,8 @@ #define PCF2127_CTRL2_IRQ_MASK ( \ PCF2127_BIT_CTRL2_AF | \ PCF2127_BIT_CTRL2_WDTF | \ - PCF2127_BIT_CTRL2_TSF2) + PCF2127_BIT_CTRL2_TSF2 | \ + PCF2127_BIT_CTRL2_MSF) #define PCF2127_MAX_TS_SUPPORTED 4 @@ -606,6 +608,21 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127) set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status); } + /* + * When using interrupt pin (INT A) as watchdog output, only allow + * watchdog interrupt (PCF2131_BIT_INT_WD_CD) and disable (mask) all + * other interrupts. + */ + if (pcf2127->cfg->type == PCF2131) { + ret = regmap_write(pcf2127->regmap, + PCF2131_REG_INT_A_MASK1, + PCF2131_BIT_INT_BLIE | + PCF2131_BIT_INT_BIE | + PCF2131_BIT_INT_AIE | + PCF2131_BIT_INT_SI | + PCF2131_BIT_INT_MI); + } + return devm_watchdog_register_device(dev, &pcf2127->wdd); } diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 79b2a16f15ad..291c0ccb0acd 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -331,7 +331,7 @@ static const struct rtc_class_ops s3c_rtcops = { .alarm_irq_enable = s3c_rtc_setaie, }; -static void s3c24xx_rtc_enable(struct s3c_rtc *info) +static void s3c6410_rtc_enable(struct s3c_rtc *info) { unsigned int con, tmp; @@ -361,19 +361,6 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info) } } -static void s3c24xx_rtc_disable(struct s3c_rtc *info) -{ - unsigned int con; - - con = readw(info->base + S3C2410_RTCCON); - con &= ~S3C2410_RTCCON_RTCEN; - writew(con, info->base + S3C2410_RTCCON); - - con = readb(info->base + S3C2410_TICNT); - con &= ~S3C2410_TICNT_ENABLE; - writeb(con, info->base + S3C2410_TICNT); -} - static void s3c6410_rtc_disable(struct s3c_rtc *info) { unsigned int con; @@ -538,53 +525,21 @@ static int s3c_rtc_resume(struct device *dev) #endif static SIMPLE_DEV_PM_OPS(s3c_rtc_pm_ops, s3c_rtc_suspend, s3c_rtc_resume); -static void s3c24xx_rtc_irq(struct s3c_rtc *info, int mask) -{ - rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF); -} - static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask) { rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF); writeb(mask, info->base + S3C2410_INTP); } -static const struct s3c_rtc_data s3c2410_rtc_data = { - .irq_handler = s3c24xx_rtc_irq, - .enable = s3c24xx_rtc_enable, - .disable = s3c24xx_rtc_disable, -}; - -static const struct s3c_rtc_data s3c2416_rtc_data = { - .irq_handler = s3c24xx_rtc_irq, - .enable = s3c24xx_rtc_enable, - .disable = s3c24xx_rtc_disable, -}; - -static const struct s3c_rtc_data s3c2443_rtc_data = { - .irq_handler = s3c24xx_rtc_irq, - .enable = s3c24xx_rtc_enable, - .disable = s3c24xx_rtc_disable, -}; - static const struct s3c_rtc_data s3c6410_rtc_data = { .needs_src_clk = true, .irq_handler = s3c6410_rtc_irq, - .enable = s3c24xx_rtc_enable, + .enable = s3c6410_rtc_enable, .disable = s3c6410_rtc_disable, }; static const __maybe_unused struct of_device_id s3c_rtc_dt_match[] = { { - .compatible = "samsung,s3c2410-rtc", - .data = &s3c2410_rtc_data, - }, { - .compatible = "samsung,s3c2416-rtc", - .data = &s3c2416_rtc_data, - }, { - .compatible = "samsung,s3c2443-rtc", - .data = &s3c2443_rtc_data, - }, { .compatible = "samsung,s3c6410-rtc", .data = &s3c6410_rtc_data, }, { diff --git a/drivers/rtc/rtc-s3c.h b/drivers/rtc/rtc-s3c.h index 3552914aa611..11d7a1255ce4 100644 --- a/drivers/rtc/rtc-s3c.h +++ b/drivers/rtc/rtc-s3c.h @@ -21,25 +21,6 @@ #define S3C2443_RTCCON_TICSEL (1 << 4) #define S3C64XX_RTCCON_TICEN (1 << 8) -#define S3C2410_TICNT S3C2410_RTCREG(0x44) -#define S3C2410_TICNT_ENABLE (1 << 7) - -/* S3C2443: tick count is 15 bit wide - * TICNT[6:0] contains upper 7 bits - * TICNT1[7:0] contains lower 8 bits - */ -#define S3C2443_TICNT_PART(x) ((x & 0x7f00) >> 8) -#define S3C2443_TICNT1 S3C2410_RTCREG(0x4C) -#define S3C2443_TICNT1_PART(x) (x & 0xff) - -/* S3C2416: tick count is 32 bit wide - * TICNT[6:0] contains bits [14:8] - * TICNT1[7:0] contains lower 8 bits - * TICNT2[16:0] contains upper 17 bits - */ -#define S3C2416_TICNT2 S3C2410_RTCREG(0x48) -#define S3C2416_TICNT2_PART(x) ((x & 0xffff8000) >> 15) - #define S3C2410_RTCALM S3C2410_RTCREG(0x50) #define S3C2410_RTCALM_ALMEN (1 << 6) #define S3C2410_RTCALM_YEAREN (1 << 5) diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c index 00c3033e8079..708ea5d964de 100644 --- a/drivers/rtc/rtc-sd2405al.c +++ b/drivers/rtc/rtc-sd2405al.c @@ -5,7 +5,9 @@ * Datasheet: * https://image.dfrobot.com/image/data/TOY0021/SD2405AL%20datasheet%20(Angelo%20v0.1).pdf * - * Copyright (C) 2024 Tóth János <gomba007@gmail.com> + * I2C slave address: 0x32 + * + * Copyright (C) 2024-2025 Tóth János <gomba007@gmail.com> */ #include <linux/bcd.h> diff --git a/drivers/rtc/rtc-spacemit-p1.c b/drivers/rtc/rtc-spacemit-p1.c new file mode 100644 index 000000000000..43ab62494bb4 --- /dev/null +++ b/drivers/rtc/rtc-spacemit-p1.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the RTC found in the SpacemiT P1 PMIC + * + * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved. + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/rtc.h> + +#define MOD_NAME "spacemit-p1-rtc" + +/* + * Six consecutive 1-byte registers hold the seconds, minutes, hours, + * day-of-month, month, and year (respectively). + * + * The range of values in these registers is: + * seconds 0-59 + * minutes 0-59 + * hours 0-59 + * day 0-30 (struct tm is 1-31) + * month 0-11 + * year years since 2000 (struct tm is since 1900) + * + * Note that the day and month must be converted after reading and + * before writing. + */ +#define RTC_TIME 0x0d /* Offset of the seconds register */ + +#define RTC_CTRL 0x1d +#define RTC_EN BIT(2) + +/* Number of attempts to read a consistent time stamp before giving up */ +#define RTC_READ_TRIES 20 /* At least 1 */ + +struct p1_rtc { + struct regmap *regmap; + struct rtc_device *rtc; +}; + +/* + * The P1 hardware documentation states that the register values are + * latched to ensure a consistent time snapshot within the registers, + * but these are in fact unstable due to a bug in the hardware design. + * So we loop until we get two identical readings. + */ +static int p1_rtc_read_time(struct device *dev, struct rtc_time *t) +{ + struct p1_rtc *p1 = dev_get_drvdata(dev); + struct regmap *regmap = p1->regmap; + u32 count = RTC_READ_TRIES; + u8 seconds; + u8 time[6]; + int ret; + + if (!regmap_test_bits(regmap, RTC_CTRL, RTC_EN)) + return -EINVAL; /* RTC is disabled */ + + ret = regmap_bulk_read(regmap, RTC_TIME, time, sizeof(time)); + if (ret) + return ret; + + do { + seconds = time[0]; + ret = regmap_bulk_read(regmap, RTC_TIME, time, sizeof(time)); + if (ret) + return ret; + } while (time[0] != seconds && --count); + + if (!count) + return -EIO; /* Unable to get a consistent result */ + + t->tm_sec = time[0] & GENMASK(5, 0); + t->tm_min = time[1] & GENMASK(5, 0); + t->tm_hour = time[2] & GENMASK(4, 0); + t->tm_mday = (time[3] & GENMASK(4, 0)) + 1; + t->tm_mon = time[4] & GENMASK(3, 0); + t->tm_year = (time[5] & GENMASK(5, 0)) + 100; + + return 0; +} + +/* + * The P1 hardware documentation states that values in the registers are + * latched so when written they represent a consistent time snapshot. + * Nevertheless, this is not guaranteed by the implementation, so we must + * disable the RTC while updating it. + */ +static int p1_rtc_set_time(struct device *dev, struct rtc_time *t) +{ + struct p1_rtc *p1 = dev_get_drvdata(dev); + struct regmap *regmap = p1->regmap; + u8 time[6]; + int ret; + + time[0] = t->tm_sec; + time[1] = t->tm_min; + time[2] = t->tm_hour; + time[3] = t->tm_mday - 1; + time[4] = t->tm_mon; + time[5] = t->tm_year - 100; + + /* Disable the RTC to update; re-enable again when done */ + ret = regmap_clear_bits(regmap, RTC_CTRL, RTC_EN); + if (ret) + return ret; + + /* If something goes wrong, leave the RTC disabled */ + ret = regmap_bulk_write(regmap, RTC_TIME, time, sizeof(time)); + if (ret) + return ret; + + return regmap_set_bits(regmap, RTC_CTRL, RTC_EN); +} + +static const struct rtc_class_ops p1_rtc_class_ops = { + .read_time = p1_rtc_read_time, + .set_time = p1_rtc_set_time, +}; + +static int p1_rtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rtc_device *rtc; + struct p1_rtc *p1; + + p1 = devm_kzalloc(dev, sizeof(*p1), GFP_KERNEL); + if (!p1) + return -ENOMEM; + dev_set_drvdata(dev, p1); + + p1->regmap = dev_get_regmap(dev->parent, NULL); + if (!p1->regmap) + return dev_err_probe(dev, -ENODEV, "failed to get regmap\n"); + + rtc = devm_rtc_allocate_device(dev); + if (IS_ERR(rtc)) + return dev_err_probe(dev, PTR_ERR(rtc), + "error allocating device\n"); + p1->rtc = rtc; + + rtc->ops = &p1_rtc_class_ops; + rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + rtc->range_max = RTC_TIMESTAMP_END_2063; + + clear_bit(RTC_FEATURE_ALARM, rtc->features); + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); + + return devm_rtc_register_device(rtc); +} + +static struct platform_driver p1_rtc_driver = { + .probe = p1_rtc_probe, + .driver = { + .name = MOD_NAME, + }, +}; + +module_platform_driver(p1_rtc_driver); + +MODULE_DESCRIPTION("SpacemiT P1 RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MOD_NAME); diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c index 54c8429b16bf..76ecf7b798f0 100644 --- a/drivers/rtc/rtc-tps6586x.c +++ b/drivers/rtc/rtc-tps6586x.c @@ -258,6 +258,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN); + rtc->irq_en = true; ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, tps6586x_rtc_irq, IRQF_ONESHOT, diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index 4bcd7ca32f27..b8a0fccef14e 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c @@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = { MODULE_DEVICE_TABLE(i2c, x1205_id); static const struct of_device_id x1205_dt_ids[] = { - { .compatible = "xircom,x1205", }, + { .compatible = "xicor,x1205", }, {}, }; MODULE_DEVICE_TABLE(of, x1205_dt_ids); diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c index f39102b66eac..3baa2b481d9f 100644 --- a/drivers/rtc/rtc-zynqmp.c +++ b/drivers/rtc/rtc-zynqmp.c @@ -277,6 +277,10 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id) static int xlnx_rtc_probe(struct platform_device *pdev) { struct xlnx_rtc_dev *xrtcdev; + bool is_alarm_set = false; + u32 pending_alrm_irq; + u32 current_time; + u32 alarm_time; int ret; xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL); @@ -296,6 +300,17 @@ static int xlnx_rtc_probe(struct platform_device *pdev) if (IS_ERR(xrtcdev->reg_base)) return PTR_ERR(xrtcdev->reg_base); + /* Clear any pending alarm interrupts from previous kernel/boot */ + pending_alrm_irq = readl(xrtcdev->reg_base + RTC_INT_STS) & RTC_INT_ALRM; + if (pending_alrm_irq) + writel(pending_alrm_irq, xrtcdev->reg_base + RTC_INT_STS); + + /* Check if a valid alarm is already set from previous kernel/boot */ + alarm_time = readl(xrtcdev->reg_base + RTC_ALRM); + current_time = readl(xrtcdev->reg_base + RTC_CUR_TM); + if (alarm_time > current_time && alarm_time != 0) + is_alarm_set = true; + xrtcdev->alarm_irq = platform_get_irq_byname(pdev, "alarm"); if (xrtcdev->alarm_irq < 0) return xrtcdev->alarm_irq; @@ -337,6 +352,10 @@ static int xlnx_rtc_probe(struct platform_device *pdev) xlnx_init_rtc(xrtcdev); + /* Re-enable alarm interrupt if a valid alarm was found */ + if (is_alarm_set) + writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN); + device_init_wakeup(&pdev->dev, true); return devm_rtc_register_device(xrtcdev->rtc); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 2c72da6b8cf0..7f1ad305eee6 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -124,7 +124,7 @@ static void mvs_free(struct mvs_info *mvi) if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) - cancel_delayed_work(&mwq->work_q); + cancel_delayed_work_sync(&mwq->work_q); kfree(mvi->rsvd_tags); kfree(mvi); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 604e66bead1e..cb95b7b12051 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4890,7 +4890,9 @@ struct purex_item { struct purex_item *pkt); atomic_t in_use; uint16_t size; - uint8_t iocb[] __counted_by(size); + struct { + uint8_t iocb[64]; + } iocb; }; #include "qla_edif.h" @@ -5099,6 +5101,7 @@ typedef struct scsi_qla_host { struct list_head head; spinlock_t lock; } purex_list; + struct purex_item default_item; struct name_list_extended gnl; /* Count of active session/fcport */ @@ -5127,11 +5130,6 @@ typedef struct scsi_qla_host { #define DPORT_DIAG_IN_PROGRESS BIT_0 #define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1 uint16_t dport_status; - - /* Must be last --ends in a flexible-array member. */ - TRAILING_OVERLAP(struct purex_item, default_item, iocb, - uint8_t __default_item_iocb[QLA_DEFAULT_PAYLOAD_SIZE]; - ); } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 4559b490614d..c4c6b5c6658c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1077,17 +1077,17 @@ static struct purex_item * qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) { struct purex_item *item = NULL; + uint8_t item_hdr_size = sizeof(*item); if (size > QLA_DEFAULT_PAYLOAD_SIZE) { - item = kzalloc(struct_size(item, iocb, size), GFP_ATOMIC); + item = kzalloc(item_hdr_size + + (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); } else { if (atomic_inc_return(&vha->default_item.in_use) == 1) { item = &vha->default_item; goto initialize_purex_header; } else { - item = kzalloc( - struct_size(item, iocb, QLA_DEFAULT_PAYLOAD_SIZE), - GFP_ATOMIC); + item = kzalloc(item_hdr_size, GFP_ATOMIC); } } if (!item) { @@ -1127,16 +1127,17 @@ qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, * @vha: SCSI driver HA context * @pkt: ELS packet */ -static struct purex_item * -qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) +static struct purex_item +*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) { struct purex_item *item; - item = qla24xx_alloc_purex_item(vha, QLA_DEFAULT_PAYLOAD_SIZE); + item = qla24xx_alloc_purex_item(vha, + QLA_DEFAULT_PAYLOAD_SIZE); if (!item) return item; - memcpy(&item->iocb, pkt, QLA_DEFAULT_PAYLOAD_SIZE); + memcpy(&item->iocb, pkt, sizeof(item->iocb)); return item; } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 065f9bcca26f..316594aa40cc 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -1308,7 +1308,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp) ql_dbg(ql_dbg_unsol, vha, 0x2121, "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n", - item->iocb[3], item->size, uctx->exchange_address, + item->iocb.iocb[3], item->size, uctx->exchange_address, fcport->d_id.b24); /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F * ----- ----------------------------------------------- diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index cb56d2af6cfa..5ffd94586652 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -6459,10 +6459,9 @@ dealloc: void qla24xx_free_purex_item(struct purex_item *item) { - if (item == &item->vha->default_item) { + if (item == &item->vha->default_item) memset(&item->vha->default_item, 0, sizeof(struct purex_item)); - memset(&item->vha->__default_item_iocb, 0, QLA_DEFAULT_PAYLOAD_SIZE); - } else + else kfree(item); } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 0904ecae253a..b19acd662726 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2774,7 +2774,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) config_item_name(&dev->dev_group.cg_item)); cur_len++; /* Extra byte for NULL terminator */ - if ((cur_len + len) > PAGE_SIZE) { + if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) { pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 0086816b27cd..c040afc6668e 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -1949,7 +1949,7 @@ static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj, return hba->dev_info.hid_sup ? attr->mode : 0; } -static const struct attribute_group ufs_sysfs_hid_group = { +const struct attribute_group ufs_sysfs_hid_group = { .name = "hid", .attrs = ufs_sysfs_hid, .is_visible = ufs_sysfs_hid_is_visible, diff --git a/drivers/ufs/core/ufs-sysfs.h b/drivers/ufs/core/ufs-sysfs.h index 8d94af3b8077..6efb82a082fd 100644 --- a/drivers/ufs/core/ufs-sysfs.h +++ b/drivers/ufs/core/ufs-sysfs.h @@ -14,5 +14,6 @@ void ufs_sysfs_remove_nodes(struct device *dev); extern const struct attribute_group ufs_sysfs_unit_descriptor_group; extern const struct attribute_group ufs_sysfs_lun_attributes_group; +extern const struct attribute_group ufs_sysfs_hid_group; #endif diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index d9632d7c5f01..8339fec975b9 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -6684,6 +6684,14 @@ static void ufshcd_err_handler(struct work_struct *work) } spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_rpm_get_noresume(hba); + if (hba->pm_op_in_progress) { + ufshcd_link_recovery(hba); + ufshcd_rpm_put(hba); + return; + } + ufshcd_rpm_put(hba); + ufshcd_err_handling_prepare(hba); spin_lock_irqsave(hba->host->host_lock, flags); @@ -8489,6 +8497,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba) DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) & UFS_DEV_HID_SUPPORT; + sysfs_update_group(&hba->dev->kobj, &ufs_sysfs_hid_group); + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, @@ -10677,6 +10687,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ spin_lock_init(&hba->clk_gating.lock); + /* Initialize mutex for PM QoS request synchronization */ + mutex_init(&hba->pm_qos_mutex); + /* * Set the default power management level for runtime and system PM. * Host controller drivers can override them in their @@ -10765,9 +10778,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) mutex_init(&hba->wb_mutex); - /* Initialize mutex for PM QoS request synchronization */ - mutex_init(&hba->pm_qos_mutex); - init_rwsem(&hba->clk_scaling_lock); ufshcd_init_clk_gating(hba); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index c3b9954df804..a257b739188d 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -126,9 +126,9 @@ config FB_ACORN config FB_CLPS711X tristate "CLPS711X LCD support" depends on FB && (ARCH_CLPS711X || COMPILE_TEST) + depends on LCD_CLASS_DEVICE select FB_IOMEM_HELPERS select FB_MODE_HELPERS - select LCD_CLASS_DEVICE select VIDEOMODE_HELPERS help Say Y to enable the Framebuffer driver for the Cirrus Logic @@ -150,7 +150,7 @@ config FB_IMX tristate "Freescale i.MX1/21/25/27 LCD support" depends on FB && HAVE_CLK && HAS_IOMEM depends on ARCH_MXC || COMPILE_TEST - select LCD_CLASS_DEVICE + depends on LCD_CLASS_DEVICE select FB_IOMEM_HELPERS select FB_MODE_HELPERS select VIDEOMODE_HELPERS @@ -948,9 +948,6 @@ config FB_RADEON a framebuffer device. There are both PCI and AGP versions. You don't need to choose this to run the Radeon in plain VGA mode. - There is a product page at - https://products.amd.com/en-us/GraphicCardResult.aspx - config FB_RADEON_I2C bool "DDC/I2C for ATI Radeon support" depends on FB_RADEON @@ -1060,6 +1057,7 @@ config FB_S3 select FB_TILEBLITTING select FB_SVGALIB select VGASTATE + select FB_CFB_REV_PIXELS_IN_BYTE select FONT_8x16 if FRAMEBUFFER_CONSOLE help Driver for graphics boards with S3 Trio / S3 Virge chip. diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index f9475c14f733..a9ec7f488522 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -160,6 +160,11 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, image.height = vc->vc_font.height; image.depth = 1; + if (image.dy >= info->var.yres) + return; + + image.height = min(image.height, info->var.yres - image.dy); + if (attribute) { buf = kmalloc(cellsize, GFP_ATOMIC); if (!buf) @@ -173,6 +178,18 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, cnt = count; image.width = vc->vc_font.width * cnt; + + if (image.dx >= info->var.xres) + break; + + if (image.dx + image.width > info->var.xres) { + image.width = info->var.xres - image.dx; + cnt = image.width / vc->vc_font.width; + if (cnt == 0) + break; + image.width = cnt * vc->vc_font.width; + } + pitch = DIV_ROUND_UP(image.width, 8) + scan_align; pitch &= ~scan_align; size = pitch * image.height + buf_align; diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c index 4d1634c492ec..594b60424d1c 100644 --- a/drivers/video/fbdev/core/fb_cmdline.c +++ b/drivers/video/fbdev/core/fb_cmdline.c @@ -40,7 +40,7 @@ int fb_get_options(const char *name, char **option) bool enabled; if (name) - is_of = strncmp(name, "offb", 4); + is_of = !strncmp(name, "offb", 4); enabled = __video_get_options(name, &options, is_of); diff --git a/drivers/video/fbdev/core/fb_fillrect.h b/drivers/video/fbdev/core/fb_fillrect.h index 66042e534de7..f366670a53af 100644 --- a/drivers/video/fbdev/core/fb_fillrect.h +++ b/drivers/video/fbdev/core/fb_fillrect.h @@ -92,8 +92,7 @@ static unsigned long pixel_to_pat(int bpp, u32 color) pattern = pattern | pattern << bpp; break; default: - pattern = color; - break; + return color; } #ifndef __LITTLE_ENDIAN pattern <<= (BITS_PER_LONG % bpp); diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 3b779c27c271..0a65bef01e3c 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -36,6 +36,7 @@ #include <video/of_videomode.h> #include <video/videomode.h> #include "../edid.h" +#include <linux/string_choices.h> /* * EDID parser @@ -320,9 +321,9 @@ static void get_dpms_capabilities(unsigned char flags, if (flags & DPMS_STANDBY) specs->dpms |= FB_DPMS_STANDBY; DPRINTK(" DPMS: Active %s, Suspend %s, Standby %s\n", - (flags & DPMS_ACTIVE_OFF) ? "yes" : "no", - (flags & DPMS_SUSPEND) ? "yes" : "no", - (flags & DPMS_STANDBY) ? "yes" : "no"); + str_yes_no(flags & DPMS_ACTIVE_OFF), + str_yes_no(flags & DPMS_SUSPEND), + str_yes_no(flags & DPMS_STANDBY)); } static void get_chroma(unsigned char *block, struct fb_monspecs *specs) diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index ade88e7bc760..676c6d3ccc12 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -674,7 +674,7 @@ static int of_platform_mb862xx_probe(struct platform_device *ofdev) struct fb_info *info; struct resource res; resource_size_t res_size; - unsigned long ret = -ENODEV; + int ret = -ENODEV; if (of_address_to_resource(np, 0, &res)) { dev_err(dev, "Invalid address\n"); diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index cfaf9454014d..72b85f475605 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/console.h> #include <linux/backlight.h> +#include <linux/string_choices.h> #ifdef CONFIG_BOOTX_TEXT #include <asm/btext.h> #endif @@ -622,7 +623,7 @@ static int nvidiafb_set_par(struct fb_info *info) else par->FPDither = !!(NV_RD32(par->PRAMDAC, 0x083C) & 1); printk(KERN_INFO PFX "Flat panel dithering %s\n", - par->FPDither ? "enabled" : "disabled"); + str_enabled_disabled(par->FPDither)); } info->fix.visual = (info->var.bits_per_pixel == 8) ? diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index baf87f34cc24..b96a8a96bce8 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -60,6 +60,7 @@ #include <linux/soc/pxa/cpu.h> #include <video/of_display_timing.h> #include <video/videomode.h> +#include <linux/string_choices.h> #include <asm/io.h> #include <asm/irq.h> @@ -1419,7 +1420,7 @@ static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) if (ret < 0) pr_warn("Unable to %s LCD supply regulator: %d\n", - on ? "enable" : "disable", ret); + str_enable_disable(on), ret); else fbi->lcd_supply_enabled = on; } diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c index ff84106ecf1c..ba30e5568cab 100644 --- a/drivers/video/fbdev/s3fb.c +++ b/drivers/video/fbdev/s3fb.c @@ -50,10 +50,14 @@ struct s3fb_info { static const struct svga_fb_format s3fb_formats[] = { { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 16}, - { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, - FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16}, + { 1, {0, 1, 0}, {0, 1, 0}, {0, 1, 0}, {0, 0, 0}, 2, + FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 32, 64}, + { 2, {0, 2, 0}, {0, 2, 0}, {0, 2, 0}, {0, 0, 0}, 2, + FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 32}, { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 1, FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16}, + { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 2, + FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16}, { 8, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 4, 8}, {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0, @@ -557,7 +561,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) /* 32bpp mode is not supported on VIRGE VX, 24bpp is not supported on others */ - if ((par->chip == CHIP_988_VIRGE_VX) ? (rv == 7) : (rv == 6)) + if ((par->chip == CHIP_988_VIRGE_VX) ? (rv == 9) : (rv == 8)) rv = -EINVAL; if (rv < 0) { @@ -607,7 +611,7 @@ static int s3fb_set_par(struct fb_info *info) struct s3fb_info *par = info->par; u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes; u32 bpp = info->var.bits_per_pixel; - u32 htotal, hsstart; + u32 htotal, hsstart, pel_msk; if (bpp != 0) { info->fix.ypanstep = 1; @@ -617,9 +621,11 @@ static int s3fb_set_par(struct fb_info *info) info->tileops = NULL; /* in 4bpp supports 8p wide tiles only, any tiles otherwise */ - if (bpp == 4) { + if (bpp == 4 && (info->var.nonstd & 1) != 0) { + int i; bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH); - set_bit(8 - 1, info->pixmap.blit_x); + for (i = 8; i <= FB_MAX_BLIT_WIDTH; i += 8) + set_bit(i - 1, info->pixmap.blit_x); } else { bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH); } @@ -730,7 +736,7 @@ static int s3fb_set_par(struct fb_info *info) vga_wcrt(par->state.vgabase, 0x50, 0x00); vga_wcrt(par->state.vgabase, 0x67, 0x50); msleep(10); /* screen remains blank sometimes without this */ - vga_wcrt(par->state.vgabase, 0x63, (mode <= 2) ? 0x90 : 0x09); + vga_wcrt(par->state.vgabase, 0x63, (mode <= 4) ? 0x90 : 0x09); vga_wcrt(par->state.vgabase, 0x66, 0x90); } @@ -763,12 +769,17 @@ static int s3fb_set_par(struct fb_info *info) svga_wcrt_mask(par->state.vgabase, 0x31, 0x00, 0x40); multiplex = 0; hmul = 1; + pel_msk = 0xff; + + svga_wcrt_mask(par->state.vgabase, 0x08, 0x00, 0x60); + svga_wcrt_mask(par->state.vgabase, 0x05, 0x00, 0x60); /* Set mode-specific register values */ switch (mode) { case 0: fb_dbg(info, "text mode\n"); svga_set_textmode_vga_regs(par->state.vgabase); + pel_msk = 0x0f; /* Set additional registers like in 8-bit mode */ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30); @@ -783,8 +794,11 @@ static int s3fb_set_par(struct fb_info *info) } break; case 1: - fb_dbg(info, "4 bit pseudocolor\n"); - vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40); + fb_dbg(info, "1 bit pseudocolor\n"); + svga_wseq_mask(par->state.vgabase, 0x01, 0x10, 0x14); + svga_wcrt_mask(par->state.vgabase, 0x08, 0x60, 0x60); + svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60); + pel_msk = 0x01; /* Set additional registers like in 8-bit mode */ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30); @@ -794,7 +808,13 @@ static int s3fb_set_par(struct fb_info *info) svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30); break; case 2: - fb_dbg(info, "4 bit pseudocolor, planar\n"); + fb_dbg(info, "2 bit pseudocolor\n"); + svga_wseq_mask(par->state.vgabase, 0x01, 0x04, 0x14); + svga_wseq_mask(par->state.vgabase, 0x04, 0x08, 0x08); + vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x20); + svga_wcrt_mask(par->state.vgabase, 0x08, 0x20, 0x60); + svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60); + pel_msk = 0x03; /* Set additional registers like in 8-bit mode */ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30); @@ -804,8 +824,35 @@ static int s3fb_set_par(struct fb_info *info) svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30); break; case 3: + fb_dbg(info, "4 bit pseudocolor, planar\n"); + pel_msk = 0x0f; + + /* Set additional registers like in 8-bit mode */ + svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30); + svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0); + svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60); + + /* disable enhanced mode */ + svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30); + break; + case 4: + fb_dbg(info, "4 bit pseudocolor\n"); + vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40); + svga_wattr(par->state.vgabase, 0x33, 0x01); + svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60); + pel_msk = 0xf0; + + /* Set additional registers like in 8-bit mode */ + svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30); + svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0); + + /* disable enhanced mode */ + svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30); + break; + case 5: fb_dbg(info, "8 bit pseudocolor\n"); svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30); + svga_wcrt_mask(par->state.vgabase, 0x05, 0x20, 0x60); if (info->var.pixclock > 20000 || par->chip == CHIP_357_VIRGE_GX2 || par->chip == CHIP_359_VIRGE_GX2P || @@ -819,7 +866,7 @@ static int s3fb_set_par(struct fb_info *info) multiplex = 1; } break; - case 4: + case 6: fb_dbg(info, "5/5/5 truecolor\n"); if (par->chip == CHIP_988_VIRGE_VX) { if (info->var.pixclock > 20000) @@ -847,7 +894,7 @@ static int s3fb_set_par(struct fb_info *info) hmul = 2; } break; - case 5: + case 7: fb_dbg(info, "5/6/5 truecolor\n"); if (par->chip == CHIP_988_VIRGE_VX) { if (info->var.pixclock > 20000) @@ -875,12 +922,12 @@ static int s3fb_set_par(struct fb_info *info) hmul = 2; } break; - case 6: + case 8: /* VIRGE VX case */ fb_dbg(info, "8/8/8 truecolor\n"); svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0); break; - case 7: + case 9: fb_dbg(info, "8/8/8/8 truecolor\n"); svga_wcrt_mask(par->state.vgabase, 0x50, 0x30, 0x30); svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0); @@ -889,6 +936,7 @@ static int s3fb_set_par(struct fb_info *info) fb_err(info, "unsupported mode - bug\n"); return -EINVAL; } + vga_w(par->state.vgabase, VGA_PEL_MSK, pel_msk); if (par->chip != CHIP_988_VIRGE_VX) { svga_wseq_mask(par->state.vgabase, 0x15, multiplex ? 0x10 : 0x00, 0x10); @@ -927,33 +975,26 @@ static int s3fb_set_par(struct fb_info *info) static int s3fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *fb) { + struct s3fb_info *par = fb->par; + int cols; + switch (fb->var.bits_per_pixel) { case 0: + case 1: + case 2: case 4: - if (regno >= 16) - return -EINVAL; - - if ((fb->var.bits_per_pixel == 4) && - (fb->var.nonstd == 0)) { - outb(0xF0, VGA_PEL_MSK); - outb(regno*16, VGA_PEL_IW); - } else { - outb(0x0F, VGA_PEL_MSK); - outb(regno, VGA_PEL_IW); - } - outb(red >> 10, VGA_PEL_D); - outb(green >> 10, VGA_PEL_D); - outb(blue >> 10, VGA_PEL_D); - break; case 8: - if (regno >= 256) + cols = 1 << (fb->var.bits_per_pixel ? fb->var.bits_per_pixel : 4); + if (regno >= cols) return -EINVAL; - outb(0xFF, VGA_PEL_MSK); - outb(regno, VGA_PEL_IW); - outb(red >> 10, VGA_PEL_D); - outb(green >> 10, VGA_PEL_D); - outb(blue >> 10, VGA_PEL_D); + if ((fb->var.bits_per_pixel == 4) && ((fb->var.nonstd & 1) == 0)) + regno <<= 4; + + vga_w(par->state.vgabase, VGA_PEL_IW, regno); + vga_w(par->state.vgabase, VGA_PEL_D, red >> 10); + vga_w(par->state.vgabase, VGA_PEL_D, green >> 10); + vga_w(par->state.vgabase, VGA_PEL_D, blue >> 10); break; case 16: if (regno >= 16) @@ -988,34 +1029,30 @@ static int s3fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, static int s3fb_blank(int blank_mode, struct fb_info *info) { struct s3fb_info *par = info->par; + u8 data; + + data = (blank_mode == FB_BLANK_UNBLANK) ? 0x00 : 0x20; + svga_wseq_mask(par->state.vgabase, 0x01, data, 0x20); + svga_wseq_mask(par->state.vgabase, 0x18, data, 0x20); switch (blank_mode) { - case FB_BLANK_UNBLANK: - fb_dbg(info, "unblank\n"); - svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06); - svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20); - break; - case FB_BLANK_NORMAL: - fb_dbg(info, "blank\n"); - svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06); - svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20); + default: + data = 0x00; break; case FB_BLANK_HSYNC_SUSPEND: - fb_dbg(info, "hsync\n"); - svga_wcrt_mask(par->state.vgabase, 0x56, 0x02, 0x06); - svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20); + data = 0x02; break; case FB_BLANK_VSYNC_SUSPEND: - fb_dbg(info, "vsync\n"); - svga_wcrt_mask(par->state.vgabase, 0x56, 0x04, 0x06); - svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20); + data = 0x04; break; case FB_BLANK_POWERDOWN: - fb_dbg(info, "sync down\n"); - svga_wcrt_mask(par->state.vgabase, 0x56, 0x06, 0x06); - svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20); + data = 0x06; break; } + svga_wcrt_mask(par->state.vgabase, 0x56, data, 0x06); + + data = (blank_mode == FB_BLANK_POWERDOWN) ? 0x01 : 0x00; + svga_wseq_mask(par->state.vgabase, 0x14, data, 0x01); return 0; } @@ -1045,6 +1082,33 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) return 0; } +/* Get capabilities of accelerator based on the mode */ + +static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var) +{ + int i; + + if (var->bits_per_pixel == 0) { + /* can only support 256 8x16 bitmap */ + bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH); + set_bit(8 - 1, caps->x); + bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT); + set_bit(16 - 1, caps->y); + caps->len = 256; + } else { + if (var->bits_per_pixel == 4 && (var->nonstd & 1) != 0) { + bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH); + for (i = 8; i <= FB_MAX_BLIT_WIDTH; i += 8) + set_bit(i - 1, caps->x); + } else { + bitmap_fill(caps->x, FB_MAX_BLIT_WIDTH); + } + bitmap_fill(caps->y, FB_MAX_BLIT_HEIGHT); + caps->len = ~(u32)0; + } +} + /* ------------------------------------------------------------------------- */ /* Frame buffer operations */ @@ -1063,7 +1127,7 @@ static const struct fb_ops s3fb_ops = { .fb_copyarea = cfb_copyarea, .fb_imageblit = s3fb_imageblit, __FB_DEFAULT_IOMEM_OPS_MMAP, - .fb_get_caps = svga_get_caps, + .fb_get_caps = s3fb_get_caps, }; /* ------------------------------------------------------------------------- */ @@ -1445,6 +1509,8 @@ static int __maybe_unused s3_pci_suspend(struct device *dev) } fb_set_suspend(info, 1); + svga_wseq_mask(par->state.vgabase, 0x18, 0x20, 0x20); + svga_wseq_mask(par->state.vgabase, 0x14, 0x01, 0x01); mutex_unlock(&(par->open_lock)); console_unlock(); @@ -1471,6 +1537,9 @@ static int __maybe_unused s3_pci_resume(struct device *dev) return 0; } + vga_wseq(par->state.vgabase, 0x08, 0x06); + svga_wseq_mask(par->state.vgabase, 0x18, 0x00, 0x20); + svga_wseq_mask(par->state.vgabase, 0x14, 0x00, 0x01); s3fb_set_par(info); fb_set_suspend(info, 0); diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 1893815dc67f..6acf5a00c2ba 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -93,6 +93,7 @@ struct simplefb_par { static void simplefb_clocks_destroy(struct simplefb_par *par); static void simplefb_regulators_destroy(struct simplefb_par *par); +static void simplefb_detach_genpds(void *res); /* * fb_ops.fb_destroy is called by the last put_fb_info() call at the end @@ -105,6 +106,7 @@ static void simplefb_destroy(struct fb_info *info) simplefb_regulators_destroy(info->par); simplefb_clocks_destroy(info->par); + simplefb_detach_genpds(info->par); if (info->screen_base) iounmap(info->screen_base); @@ -445,13 +447,14 @@ static void simplefb_detach_genpds(void *res) if (!IS_ERR_OR_NULL(par->genpds[i])) dev_pm_domain_detach(par->genpds[i], true); } + par->num_genpds = 0; } static int simplefb_attach_genpds(struct simplefb_par *par, struct platform_device *pdev) { struct device *dev = &pdev->dev; - unsigned int i; + unsigned int i, num_genpds; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -465,26 +468,35 @@ static int simplefb_attach_genpds(struct simplefb_par *par, return err; } - par->num_genpds = err; + num_genpds = err; /* * Single power-domain devices are handled by the driver core, so * nothing to do here. */ - if (par->num_genpds <= 1) + if (num_genpds <= 1) { + par->num_genpds = num_genpds; return 0; + } - par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds), + par->genpds = devm_kcalloc(dev, num_genpds, sizeof(*par->genpds), GFP_KERNEL); if (!par->genpds) return -ENOMEM; - par->genpd_links = devm_kcalloc(dev, par->num_genpds, + par->genpd_links = devm_kcalloc(dev, num_genpds, sizeof(*par->genpd_links), GFP_KERNEL); if (!par->genpd_links) return -ENOMEM; + /* + * Set par->num_genpds only after genpds and genpd_links are allocated + * to exit early from simplefb_detach_genpds() without full + * initialisation. + */ + par->num_genpds = num_genpds; + for (i = 0; i < par->num_genpds; i++) { par->genpds[i] = dev_pm_domain_attach_by_id(dev, i); if (IS_ERR(par->genpds[i])) { @@ -506,9 +518,10 @@ static int simplefb_attach_genpds(struct simplefb_par *par, dev_warn(dev, "failed to link power-domain %u\n", i); } - return devm_add_action_or_reset(dev, simplefb_detach_genpds, par); + return 0; } #else +static void simplefb_detach_genpds(void *res) { } static int simplefb_attach_genpds(struct simplefb_par *par, struct platform_device *pdev) { @@ -622,18 +635,20 @@ static int simplefb_probe(struct platform_device *pdev) ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size); if (ret) { dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret); - goto error_regulators; + goto error_genpds; } ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); - goto error_regulators; + goto error_genpds; } dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); return 0; +error_genpds: + simplefb_detach_genpds(par); error_regulators: simplefb_regulators_destroy(par); error_clocks: diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index c90f48ebb15e..d8f3bfb2dd6c 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -390,7 +390,7 @@ static int xenfb_probe(struct xenbus_device *dev, info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; - info->gfns = vmalloc(array_size(sizeof(unsigned long), info->nr_pages)); + info->gfns = vmalloc_array(info->nr_pages, sizeof(unsigned long)); if (!info->gfns) goto error_nomem; |