summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/char/tpm/Kconfig3
-rw-r--r--drivers/char/tpm/tpm-interface.c2
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c127
-rw-r--r--drivers/char/tpm/tpm2-sessions.c104
-rw-r--r--drivers/char/tpm/tpm_ppi.c89
-rw-r--r--drivers/char/tpm/tpm_tis_core.c4
-rw-r--r--drivers/gpio/gpio-usbio.c1
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h2
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c12
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c34
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c23
-rw-r--r--drivers/gpu/drm/xe/xe_device.c19
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c6
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c20
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_query.c15
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c11
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h14
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c1
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c459
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c18
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c118
-rw-r--r--drivers/ntb/ntb_transport.c7
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/unittest.c1
-rw-r--r--drivers/video/fbdev/Kconfig8
-rw-r--r--drivers/video/fbdev/core/bitblit.c17
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c2
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h3
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c3
-rw-r--r--drivers/video/fbdev/pxafb.c3
-rw-r--r--drivers/video/fbdev/s3fb.c177
-rw-r--r--drivers/video/fbdev/simplefb.c31
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
79 files changed, 1113 insertions, 769 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 053a086d547e..13ce229d450c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -551,8 +551,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -822,7 +824,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -993,8 +995,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
is_loop = is_loop_device(file);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index ba3924eb13ba..8a8f692b6088 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,10 +29,11 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default X86_64
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Setting this causes us to deploy a scheme which uses request
and response HMACs in addition to encryption for
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index b71725827743..c9f173001d0e 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(suspend_pcr,
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_calc_ordinal_duration(chip, ordinal);
+ return tpm2_calc_ordinal_duration(ordinal);
else
return tpm1_calc_ordinal_duration(chip, ordinal);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7bb87fa5f7a1..2726bd38e5ac 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -299,7 +299,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 524d802ede26..7d77f6fbc152 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -28,120 +28,57 @@ static struct tpm2_hash tpm2_hash_map[] = {
int tpm2_get_timeouts(struct tpm_chip *chip)
{
- /* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
-
- /* PTP spec timeouts */
- chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
-
- /* Key creation commands long timeouts */
- chip->duration[TPM_LONG_LONG] =
- msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
-
chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
-
return 0;
}
-/**
- * tpm2_ordinal_duration_index() - returns an index to the chip duration table
- * @ordinal: TPM command ordinal.
- *
- * The function returns an index to the chip duration table
- * (enum tpm_duration), that describes the maximum amount of
- * time the chip could take to return the result for a particular ordinal.
- *
- * The values of the MEDIUM, and LONG durations are taken
- * from the PC Client Profile (PTP) specification (750, 2000 msec)
- *
- * LONG_LONG is for commands that generates keys which empirically takes
- * a longer time on some systems.
- *
- * Return:
- * * TPM_MEDIUM
- * * TPM_LONG
- * * TPM_LONG_LONG
- * * TPM_UNDEFINED
+/*
+ * Contains the maximum durations in milliseconds for TPM2 commands.
*/
-static u8 tpm2_ordinal_duration_index(u32 ordinal)
-{
- switch (ordinal) {
- /* Startup */
- case TPM2_CC_STARTUP: /* 144 */
- return TPM_MEDIUM;
-
- case TPM2_CC_SELF_TEST: /* 143 */
- return TPM_LONG;
-
- case TPM2_CC_GET_RANDOM: /* 17B */
- return TPM_LONG;
-
- case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
- return TPM_MEDIUM;
- case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
- return TPM_MEDIUM;
- case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
- return TPM_MEDIUM;
- case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
- return TPM_MEDIUM;
-
- case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG_LONG;
-
- case TPM2_CC_PCR_EXTEND: /* 182 */
- return TPM_MEDIUM;
-
- case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
- return TPM_LONG;
- case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
- return TPM_LONG;
-
- case TPM2_CC_GET_CAPABILITY: /* 17A */
- return TPM_MEDIUM;
-
- case TPM2_CC_NV_READ: /* 14E */
- return TPM_LONG;
-
- case TPM2_CC_CREATE_PRIMARY: /* 131 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE: /* 153 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE_LOADED: /* 191 */
- return TPM_LONG_LONG;
-
- default:
- return TPM_UNDEFINED;
- }
-}
+static const struct {
+ unsigned long ordinal;
+ unsigned long duration;
+} tpm2_ordinal_duration_map[] = {
+ {TPM2_CC_STARTUP, 750},
+ {TPM2_CC_SELF_TEST, 3000},
+ {TPM2_CC_GET_RANDOM, 2000},
+ {TPM2_CC_SEQUENCE_UPDATE, 750},
+ {TPM2_CC_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_HASH_SEQUENCE_START, 750},
+ {TPM2_CC_VERIFY_SIGNATURE, 30000},
+ {TPM2_CC_PCR_EXTEND, 750},
+ {TPM2_CC_HIERARCHY_CONTROL, 2000},
+ {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000},
+ {TPM2_CC_GET_CAPABILITY, 750},
+ {TPM2_CC_NV_READ, 2000},
+ {TPM2_CC_CREATE_PRIMARY, 30000},
+ {TPM2_CC_CREATE, 30000},
+ {TPM2_CC_CREATE_LOADED, 30000},
+};
/**
- * tpm2_calc_ordinal_duration() - calculate the maximum command duration
- * @chip: TPM chip to use.
+ * tpm2_calc_ordinal_duration() - Calculate the maximum command duration
* @ordinal: TPM command ordinal.
*
- * The function returns the maximum amount of time the chip could take
- * to return the result for a particular ordinal in jiffies.
- *
- * Return: A maximal duration time for an ordinal in jiffies.
+ * Returns the maximum amount of time the chip is expected by kernel to
+ * take in jiffies.
*/
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal)
{
- unsigned int index;
+ int i;
- index = tpm2_ordinal_duration_index(ordinal);
+ for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++)
+ if (ordinal == tpm2_ordinal_duration_map[i].ordinal)
+ return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration);
- if (index != TPM_UNDEFINED)
- return chip->duration[index];
- else
- return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
}
-
struct tpm2_pcr_read_out {
__be32 update_cnt;
__be32 pcr_selects_cnt;
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index bdb119453dfb..6d03c224e6b2 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -69,8 +69,8 @@
#include <linux/unaligned.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
@@ -385,51 +385,6 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
/*
- * It turns out the crypto hmac(sha256) is hard for us to consume
- * because it assumes a fixed key and the TPM seems to change the key
- * on every operation, so we weld the hmac init and final functions in
- * here to give it the same usage characteristics as a regular hash
- */
-static void tpm2_hmac_init(struct sha256_ctx *sctx, u8 *key, u32 key_len)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- sha256_init(sctx);
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_IPAD_VALUE;
- }
- sha256_update(sctx, pad, sizeof(pad));
-}
-
-static void tpm2_hmac_final(struct sha256_ctx *sctx, u8 *key, u32 key_len,
- u8 *out)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_OPAD_VALUE;
- }
-
- /* collect the final hash; use out as temporary storage */
- sha256_final(sctx, out);
-
- sha256_init(sctx);
- sha256_update(sctx, pad, sizeof(pad));
- sha256_update(sctx, out, SHA256_DIGEST_SIZE);
- sha256_final(sctx, out);
-}
-
-/*
* assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
* otherwise standard tpm2_KDFa. Note output is in bytes not bits.
*/
@@ -440,16 +395,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
__be32 c = cpu_to_be32(counter);
- tpm2_hmac_init(&sctx, key, key_len);
- sha256_update(&sctx, (u8 *)&c, sizeof(c));
- sha256_update(&sctx, label, strlen(label)+1);
- sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
- tpm2_hmac_final(&sctx, key, key_len, out);
+ hmac_sha256_init_usingrawkey(&hctx, key, key_len);
+ hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c));
+ hmac_sha256_update(&hctx, label, strlen(label) + 1);
+ hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits));
+ hmac_sha256_final(&hctx, out);
bytes -= SHA256_DIGEST_SIZE;
counter++;
@@ -593,6 +548,7 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
if (!auth)
return;
@@ -704,14 +660,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
sha256_final(&sctx, cphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, cphash, sizeof(cphash));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, hmac);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, cphash, sizeof(cphash));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
+ hmac_sha256_final(&hctx, hmac);
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
@@ -751,6 +707,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
u8 rphash[SHA256_DIGEST_SIZE];
u32 attrs, cc;
struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
u16 tag = be16_to_cpu(head->tag);
int parm_len, len, i, handles;
@@ -820,21 +777,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
sha256_final(&sctx, rphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, rphash, sizeof(rphash));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, rphash, sizeof(rphash));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
/* we're done with the rphash, so put our idea of the hmac there */
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, rphash);
- if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
- rc = 0;
- } else {
+ hmac_sha256_final(&hctx, rphash);
+ if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) {
dev_err(&chip->dev, "TPM: HMAC check failed\n");
goto out;
}
+ rc = 0;
/* now do response decryption */
if (auth->attrs & TPM2_SA_ENCRYPT) {
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index d53fce1c9d6f..c9793a3d986d 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static const char * const tpm_ppi_info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by system firmware",
+ "User required",
+ "User not required",
+};
+
+/* A spinlock to protect access to the cache from concurrent reads */
+static DEFINE_MUTEX(tpm_ppi_lock);
+
+static u32 ppi_operations_cache[PPI_VS_REQ_END + 1];
+static bool ppi_cache_populated;
+
static bool tpm_ppi_req_has_parameter(u64 req)
{
return req == 23;
@@ -277,8 +291,7 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
- u32 end)
+static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf)
{
int i;
u32 ret;
@@ -286,34 +299,22 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
- static char *info[] = {
- "Not implemented",
- "BIOS only",
- "Blocked for OS by BIOS",
- "User required",
- "User not required",
- };
-
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
- for (i = start; i <= end; i++) {
+ for (i = 0; i <= PPI_VS_REQ_END; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
ACPI_TYPE_INTEGER, &argv,
TPM_PPI_REVISION_ID_1);
- if (!obj) {
+ if (!obj)
return -ENOMEM;
- } else {
- ret = obj->integer.value;
- ACPI_FREE(obj);
- }
- if (ret > 0 && ret < ARRAY_SIZE(info))
- len += sysfs_emit_at(buf, len, "%d %d: %s\n",
- i, ret, info[ret]);
+ ret = obj->integer.value;
+ ppi_operations_cache[i] = ret;
+ ACPI_FREE(obj);
}
return len;
@@ -324,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
- return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
- PPI_TPM_REQ_MAX);
+ ppi_cache_populated = true;
+ }
+
+ for (i = 0; i <= PPI_TPM_REQ_MAX; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
@@ -334,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
- return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
- PPI_VS_REQ_END);
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
+
+ for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 4b12c4b9da8b..8954a8660ffc 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -978,8 +978,8 @@ restore_irqs:
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
+ tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
+ original_int_vec);
rc = -1;
}
diff --git a/drivers/gpio/gpio-usbio.c b/drivers/gpio/gpio-usbio.c
index e13c120824e3..34d42c743d5b 100644
--- a/drivers/gpio/gpio-usbio.c
+++ b/drivers/gpio/gpio-usbio.c
@@ -29,6 +29,7 @@ static const struct acpi_device_id usbio_gpio_acpi_hids[] = {
{ "INTC1007" }, /* MTL */
{ "INTC10B2" }, /* ARL */
{ "INTC10B5" }, /* LNL */
+ { "INTC10D1" }, /* MTL-CVF */
{ "INTC10E2" }, /* PTL */
{ }
};
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 4af504c23e6f..572b85e77370 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -103,7 +103,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->can_sleep = false;
+ chip->can_sleep = true;
return devm_gpiochip_add_data(dev, chip, data);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7c54fe6b0f5d..83020963dfde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2586,12 +2586,17 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* from the KFD, trigger a segmentation fault in VM debug mode.
*/
if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
pid_nr(process_info->pid), mem->va);
// Send GPU VM fault to user space
- kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
- mem->va);
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
}
ret = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a77000c2e0bb..7a899fb4de29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -6389,23 +6389,28 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- if (tmp_adev->asic_reset_res)
- r = tmp_adev->asic_reset_res;
-
- tmp_adev->asic_reset_res = 0;
-
- if (r) {
+ if (tmp_adev->asic_reset_res) {
/* bad news, how to tell it to userspace ?
* for ras error, we should report GPU bad status instead of
* reset failure
*/
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
- atomic_read(&tmp_adev->gpu_reset_counter));
- amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
if (amdgpu_acpi_smart_shift_update(tmp_adev,
AMDGPU_SS_DEV_D0))
dev_warn(tmp_adev->dev,
@@ -7157,28 +7162,35 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
{
- struct pci_dev *parent = pci_upstream_bridge(adev->pdev);
+ struct pci_dev *swus, *swds;
int r;
- if (!parent || parent->vendor != PCI_VENDOR_ID_ATI)
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
return;
/* If already saved, return */
if (adev->pcie_reset_ctx.swus)
return;
/* Upstream bridge is ATI, assume it's SWUS/DS architecture */
- r = pci_save_state(parent);
+ r = pci_save_state(swds);
if (r)
return;
- adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(parent);
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
- parent = pci_upstream_bridge(parent);
- r = pci_save_state(parent);
+ r = pci_save_state(swus);
if (r)
return;
- adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(parent);
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
- adev->pcie_reset_ctx.swus = parent;
+ adev->pcie_reset_ctx.swus = swus;
}
static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a09ccf7d8aa2..ebe2b4c68b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1102,6 +1102,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
@@ -1171,6 +1174,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8676400834fc..a9327472c651 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1421,14 +1421,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_debugfs_vm_init(file_priv);
- r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
if (r)
goto error_pasid;
- r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
- if (r)
- goto error_vm;
-
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
@@ -1468,10 +1464,8 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
- if (pasid) {
+ if (pasid)
amdgpu_pasid_free(pasid);
- amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
- }
kfree(fpriv);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 1578e4e2bf84..8c0e5d03de50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2352,7 +2352,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 48e0932f5b62..1add21160d21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -726,12 +726,12 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
struct amdgpu_bo *bo;
int ret;
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va,
base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
bo = bo_va->base.bo;
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
@@ -748,9 +748,9 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
if (ret)
return ret;
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8c28e8923f02..c1a801203949 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -139,48 +139,6 @@ static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm pointer
- * @pasid: the pasid the VM is using on this GPU
- *
- * Set the pasid this VM is using on this GPU, can also be used to remove the
- * pasid by passing in zero.
- *
- */
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid)
-{
- int r;
-
- amdgpu_vm_assert_locked(vm);
-
- if (vm->pasid == pasid)
- return 0;
-
- if (vm->pasid) {
- r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
- if (r < 0)
- return r;
-
- vm->pasid = 0;
- }
-
- if (pasid) {
- r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
- GFP_KERNEL));
- if (r < 0)
- return r;
-
- vm->pasid = pasid;
- }
-
-
- return 0;
-}
-
-/**
* amdgpu_vm_bo_evicted - vm_bo is evicted
*
* @vm_bo: vm_bo which is evicted
@@ -195,10 +153,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
vm_bo->moved = true;
amdgpu_vm_assert_locked(vm);
+ spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -211,7 +171,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -225,7 +187,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -239,9 +203,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -254,9 +218,10 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
- amdgpu_vm_assert_locked(vm_bo->vm);
vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -270,10 +235,13 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- if (vm_bo->bo->parent)
+ if (vm_bo->bo->parent) {
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- else
+ spin_unlock(&vm_bo->vm->status_lock);
+ } else {
amdgpu_vm_bo_idle(vm_bo);
+ }
}
/**
@@ -287,7 +255,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -301,13 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
- spin_lock(&vm->invalidated_lock);
+ amdgpu_vm_assert_locked(vm);
+
+ spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
- spin_unlock(&vm->invalidated_lock);
- amdgpu_vm_assert_locked(vm_bo->vm);
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -317,13 +287,14 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
else if (bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
+ spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
- * Takes the vm stats_lock and updates the shared memory stat. If the basic
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
@@ -336,7 +307,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
bool shared;
dma_resv_assert_held(bo->tbo.base.resv);
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
@@ -348,7 +319,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
vm->stats[bo_memtype].drm.private += size;
}
}
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -373,11 +344,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
- * Caller need to have the vm stats_lock held. Useful for when multiple update
+ * Caller need to have the vm status_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
- struct ttm_resource *res, int sign)
+ struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
@@ -401,8 +372,7 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
- if (!(bo->preferred_domains &
- amdgpu_mem_type_to_domain(res_memtype)))
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
@@ -421,9 +391,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
{
struct amdgpu_vm *vm = base->vm;
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -449,10 +419,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -511,10 +481,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
int ret;
/* We can only trust prev->next while holding the lock */
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
while (!list_is_head(prev->next, &vm->done)) {
bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
bo = bo_va->base.bo;
if (bo) {
@@ -522,10 +492,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
if (unlikely(ret))
return ret;
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
prev = prev->next;
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -621,7 +591,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
- struct amdgpu_vm_bo_base *bo_base, *tmp;
+ struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *bo;
int r;
@@ -634,7 +604,13 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
bo = bo_base->bo;
r = validate(param, bo);
@@ -647,21 +623,26 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
+ spin_lock(&vm->status_lock);
}
+ while (ticket && !list_empty(&vm->evicted_user)) {
+ bo_base = list_first_entry(&vm->evicted_user,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+ dma_resv_assert_held(bo->tbo.base.resv);
- if (ticket) {
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
- vm_status) {
- bo = bo_base->bo;
- dma_resv_assert_held(bo->tbo.base.resv);
+ r = validate(param, bo);
+ if (r)
+ return r;
- r = validate(param, bo);
- if (r)
- return r;
+ amdgpu_vm_bo_invalidated(bo_base);
- amdgpu_vm_bo_invalidated(bo_base);
- }
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -690,7 +671,9 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
+ spin_lock(&vm->status_lock);
ret &= list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
@@ -981,13 +964,18 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_vm_bo_base *entry, *tmp;
+ struct amdgpu_vm_bo_base *entry;
bool flush_tlb_needed = false;
+ LIST_HEAD(relocated);
int r, idx;
amdgpu_vm_assert_locked(vm);
- if (list_empty(&vm->relocated))
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->relocated, &relocated);
+ spin_unlock(&vm->status_lock);
+
+ if (list_empty(&relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -1003,7 +991,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &vm->relocated, vm_status) {
+ list_for_each_entry(entry, &relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -1019,7 +1007,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
+ while (!list_empty(&relocated)) {
+ entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
+ vm_status);
amdgpu_vm_bo_idle(entry);
}
@@ -1246,9 +1236,9 @@ error_free:
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -1615,24 +1605,29 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{
- struct amdgpu_bo_va *bo_va, *tmp;
+ struct amdgpu_bo_va *bo_va;
struct dma_resv *resv;
bool clear, unlock;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
+ spin_lock(&vm->status_lock);
}
- spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
@@ -1664,9 +1659,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -2195,9 +2190,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2305,10 +2300,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
@@ -2554,6 +2549,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
*
@@ -2561,7 +2557,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int32_t xcp_id)
+ int32_t xcp_id, uint32_t pasid)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2575,12 +2571,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
- spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
INIT_KFIFO(vm->faults);
- spin_lock_init(&vm->stats_lock);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@@ -2638,12 +2633,26 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
dev_dbg(adev->dev, "Failed to create task info for VM\n");
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
+
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
return 0;
error_free_root:
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2749,7 +2758,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_set_pasid(adev, vm, 0);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
@@ -3038,6 +3051,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
amdgpu_vm_assert_locked(vm);
+ spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -3075,13 +3089,11 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->invalidated_lock);
total_invalidated_objs = id;
id = 0;
@@ -3091,6 +3103,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
+ spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index adc5c9161fa8..cf0ec94e8a07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by vm reservation and invalidated_lock */
+ /* protected by vm status_lock */
struct list_head vm_status;
/* if the bo is counted as shared in mem stats
- * protected by vm BO being reserved */
+ * protected by vm status_lock */
bool shared;
/* protected by the BO being reserved */
@@ -343,8 +343,10 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
- /* Memory statistics for this vm, protected by stats_lock */
- spinlock_t stats_lock;
+ /* Lock to protect vm_bo add/del/move on all lists of vm */
+ spinlock_t status_lock;
+
+ /* Memory statistics for this vm, protected by status_lock */
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
/*
@@ -352,8 +354,6 @@ struct amdgpu_vm {
* PDs, PTs or per VM BOs. The state transits are:
*
* evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
- *
- * Lists are protected by the root PD dma_resv lock.
*/
/* Per-VM and PT BOs who needs a validation */
@@ -374,10 +374,7 @@ struct amdgpu_vm {
* state transits are:
*
* evicted_user or invalidated -> done
- *
- * Lists are protected by the invalidated_lock.
*/
- spinlock_t invalidated_lock;
/* BOs for user mode queues that need a validation */
struct list_head evicted_user;
@@ -503,11 +500,8 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid);
-
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 7a4c12ff9b18..f794fb1cc06e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -543,7 +543,9 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
+ spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
+ spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
@@ -587,6 +589,7 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_pt_cursor seek;
struct amdgpu_vm_bo_base *entry;
+ spin_lock(&params->vm->status_lock);
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
if (entry && entry->bo)
list_move(&entry->vm_status, &params->tlb_flush_waitlist);
@@ -594,6 +597,7 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
/* enter start node now */
list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
+ spin_unlock(&params->vm->status_lock);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 404cc8c2ff2c..f4a19357ccbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -337,7 +337,7 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid, i;
if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
- (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) {
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
struct mes_inv_tlbs_pasid_input input = {0};
input.pasid = pasid;
input.flush_type = flush_type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 273f42e3afdd..9d72411c3379 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3045,6 +3045,8 @@ retry_write_locked:
if (svms->checkpoint_ts[gpuidx] != 0) {
if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ if (write_locked)
+ mmap_write_downgrade(mm);
r = -EAGAIN;
goto out_unlock_svms;
} else {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8e1622bf7a42..0d03e324d5b9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2000,6 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.disable_ips_in_vpb = 0;
+ /* DCN35 and above supports dynamic DTBCLK switch */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ init_data.flags.allow_0_dtb_clk = true;
+
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 2b1673d69ea8..1ab5ae9b5ea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
if (data->taps.h_taps + data->taps.v_taps <= 2) {
- /* Set bypass */
-
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ /* Disable scaler functionality */
+ REG_WRITE(SCL_SCALER_ENABLE, 0);
+ /* Clear registers that can cause glitches even when the scaler is off */
+ REG_WRITE(SCL_TAP_CONTROL, 0);
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
return false;
}
@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ REG_WRITE(SCL_SCALER_ENABLE, 1);
/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
REG_SET(DC_LB_MEM_SIZE, 0,
DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ REG_WRITE(SCL_UPDATE, 0x00010000);
+
/* Clear SCL_F_SHARP_CONTROL value to 0 */
REG_WRITE(SCL_F_SHARP_CONTROL, 0);
@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
/* 4. Program vertical filters */
if (xfm_dce->filter_v == NULL)
- REG_SET(SCL_VERT_FILTER_CONTROL, 0,
- SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.v_taps,
@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
/* 5. Program horizontal filters */
if (xfm_dce->filter_h == NULL)
- REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
- SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.h_taps,
@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
+
+ REG_WRITE(SCL_UPDATE, 0);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index cbce194ec7b8..eb716e8337e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -155,6 +155,9 @@
SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
SRI(VIEWPORT_START, SCL, id), \
SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_SCALER_ENABLE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_INIT, SCL, id), \
@@ -590,6 +593,7 @@ struct dce_transform_registers {
uint32_t SCL_VERT_FILTER_SCALE_RATIO;
uint32_t SCL_HORZ_FILTER_INIT;
#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t SCL_SCALER_ENABLE;
uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 17a21bcbde17..1a28061bb9ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
+ dc_assert_fp_enabled();
+
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
@@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
+ dc_assert_fp_enabled();
+
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index c9dd920744c9..817a370e80a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -445,6 +445,8 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
validate_mode);
@@ -498,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
@@ -581,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
unsigned int i, plane_count = 0;
DC_LOGGER_INIT(dc->ctx->logger);
+ dc_assert_fp_enabled();
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 8cda18ce1a76..77023b619f1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -478,6 +478,8 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
validate_mode);
@@ -531,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 53c67ebe779f..b75be6ad64f6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -404,13 +404,13 @@ static const struct dc_plane_cap plane_cap = {
},
.max_upscale_factor = {
- .argb8888 = 16000,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
- .argb8888 = 250,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 07552445e424..fff57f23f4f7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1760,6 +1760,20 @@ enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_stat
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1770,7 +1784,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index cb0478a9a34d..0abd163b425e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1732,6 +1732,21 @@ static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn351_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+
+}
+
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1742,7 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.validate_bandwidth = dcn351_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index 126090c9bb8a..ca125ee6c2fb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1734,6 +1734,20 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
static struct resource_funcs dcn36_res_pool_funcs = {
.destroy = dcn36_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1744,7 +1758,7 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 55b929ca7982..b1fb0f8a253a 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -641,16 +641,16 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
- int sign = 1;
+ int h_sign = flip_horz_scan_dir ? -1 : 1;
+ int v_sign = flip_vert_scan_dir ? -1 : 1;
switch (spl_in->basic_in.cositing) {
-
case CHROMA_COSITING_TOPLEFT:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(v_sign, 4);
break;
case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
init_adj_v = spl_fixpt_zero;
break;
case CHROMA_COSITING_NONE:
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
index 9de01ae574c0..067eddd9c62d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -4115,6 +4115,7 @@
#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL0_SCL_SCALER_ENABLE 0x1B42
#define mmSCL0_SCL_CONTROL 0x1B44
#define mmSCL0_SCL_DEBUG 0x1B6A
#define mmSCL0_SCL_DEBUG2 0x1B69
@@ -4144,6 +4145,7 @@
#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
#define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
+#define mmSCL1_SCL_SCALER_ENABLE 0x1E42
#define mmSCL1_SCL_CONTROL 0x1E44
#define mmSCL1_SCL_DEBUG 0x1E6A
#define mmSCL1_SCL_DEBUG2 0x1E69
@@ -4173,6 +4175,7 @@
#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
#define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL2_SCL_SCALER_ENABLE 0x4142
#define mmSCL2_SCL_CONTROL 0x4144
#define mmSCL2_SCL_DEBUG 0x416A
#define mmSCL2_SCL_DEBUG2 0x4169
@@ -4202,6 +4205,7 @@
#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
#define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
+#define mmSCL3_SCL_SCALER_ENABLE 0x4442
#define mmSCL3_SCL_CONTROL 0x4444
#define mmSCL3_SCL_DEBUG 0x446A
#define mmSCL3_SCL_DEBUG2 0x4469
@@ -4231,6 +4235,7 @@
#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
#define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
+#define mmSCL4_SCL_SCALER_ENABLE 0x4742
#define mmSCL4_SCL_CONTROL 0x4744
#define mmSCL4_SCL_DEBUG 0x476A
#define mmSCL4_SCL_DEBUG2 0x4769
@@ -4260,6 +4265,7 @@
#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
#define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
+#define mmSCL5_SCL_SCALER_ENABLE 0x4A42
#define mmSCL5_SCL_CONTROL 0x4A44
#define mmSCL5_SCL_DEBUG 0x4A6A
#define mmSCL5_SCL_DEBUG2 0x4A69
@@ -4287,6 +4293,7 @@
#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL_COEF_RAM_SELECT 0x1B40
#define mmSCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL_SCALER_ENABLE 0x1B42
#define mmSCL_CONTROL 0x1B44
#define mmSCL_DEBUG 0x1B6A
#define mmSCL_DEBUG2 0x1B69
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index 2d6a598a6c25..9317a7afa621 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -8650,6 +8650,8 @@
#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 1a1f2a6b2e52..a89075e25717 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -288,7 +288,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_driver_if_version) {
+ if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
+ if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index cbe5b06438c1..285cf7979693 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -450,8 +450,7 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
- if (((pgm == 0) && (fw_ver >= 0x00558200)) ||
- ((pgm == 4) && (fw_ver >= 0x04557100)))
+ if ((pgm == 0) && (fw_ver >= 0x00558200))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
@@ -3933,7 +3932,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
- smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
smu_v13_0_6_set_temp_funcs(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index d588f74b98de..0ae91c8b6d72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,8 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF
+
#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
do { \
typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index eeeeb99cfdf6..cb906765897e 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -361,7 +361,6 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
* @name: Name of the GPU SVM.
* @drm: Pointer to the DRM device structure.
* @mm: Pointer to the mm_struct for the address space.
- * @device_private_page_owner: Device private pages owner.
* @mm_start: Start address of GPU SVM.
* @mm_range: Range of the GPU SVM.
* @notifier_size: Size of individual notifiers.
@@ -383,7 +382,7 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const char *name, struct drm_device *drm,
- struct mm_struct *mm, void *device_private_page_owner,
+ struct mm_struct *mm,
unsigned long mm_start, unsigned long mm_range,
unsigned long notifier_size,
const struct drm_gpusvm_ops *ops,
@@ -395,15 +394,13 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
mmgrab(mm);
} else {
/* No full SVM mode, only core drm_gpusvm_pages API. */
- if (ops || num_chunks || mm_range || notifier_size ||
- device_private_page_owner)
+ if (ops || num_chunks || mm_range || notifier_size)
return -EINVAL;
}
gpusvm->name = name;
gpusvm->drm = drm;
gpusvm->mm = mm;
- gpusvm->device_private_page_owner = device_private_page_owner;
gpusvm->mm_start = mm_start;
gpusvm->mm_range = mm_range;
gpusvm->notifier_size = notifier_size;
@@ -684,6 +681,7 @@ static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn,
* @notifier: Pointer to the GPU SVM notifier structure
* @start: Start address
* @end: End address
+ * @dev_private_owner: The device private page owner
*
* Check if pages between start and end have been faulted in on the CPU. Use to
* prevent migration of pages without CPU backing store.
@@ -692,14 +690,15 @@ static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn,
*/
static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ void *dev_private_owner)
{
struct hmm_range hmm_range = {
.default_flags = 0,
.notifier = &notifier->notifier,
.start = start,
.end = end,
- .dev_private_owner = gpusvm->device_private_page_owner,
+ .dev_private_owner = dev_private_owner,
};
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -753,6 +752,7 @@ err_free:
* @gpuva_start: Start address of GPUVA which mirrors CPU
* @gpuva_end: End address of GPUVA which mirrors CPU
* @check_pages_threshold: Check CPU pages for present threshold
+ * @dev_private_owner: The device private page owner
*
* This function determines the chunk size for the GPU SVM range based on the
* fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
@@ -767,7 +767,8 @@ drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
unsigned long fault_addr,
unsigned long gpuva_start,
unsigned long gpuva_end,
- unsigned long check_pages_threshold)
+ unsigned long check_pages_threshold,
+ void *dev_private_owner)
{
unsigned long start, end;
int i = 0;
@@ -814,7 +815,7 @@ retry:
* process-many-malloc' mallocs at least 64k at a time.
*/
if (end - start <= check_pages_threshold &&
- !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) {
+ !drm_gpusvm_check_pages(gpusvm, notifier, start, end, dev_private_owner)) {
++i;
goto retry;
}
@@ -957,7 +958,8 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
fault_addr, gpuva_start,
gpuva_end,
- ctx->check_pages_threshold);
+ ctx->check_pages_threshold,
+ ctx->device_private_page_owner);
if (chunk_size == LONG_MAX) {
err = -EINVAL;
goto err_notifier_remove;
@@ -1268,7 +1270,7 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
.notifier = notifier,
.start = pages_start,
.end = pages_end,
- .dev_private_owner = gpusvm->device_private_page_owner,
+ .dev_private_owner = ctx->device_private_page_owner,
};
void *zdd;
unsigned long timeout =
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b96f0555ca14..f26562eafffc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -929,7 +929,7 @@ done:
nvif_vmm_put(vmm, &old_mem->vma[1]);
nvif_vmm_put(vmm, &old_mem->vma[0]);
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 0317f3d7452a..1884686985b8 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -62,6 +62,8 @@ struct v3d_queue_state {
/* Currently active job for this queue */
struct v3d_job *active_job;
spinlock_t queue_lock;
+ /* Protect dma fence for signalling job completion */
+ spinlock_t fence_lock;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 8f8471adae34..c82500a1df73 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -15,7 +15,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q)
fence->dev = &v3d->drm;
fence->queue = q;
fence->seqno = ++queue->emit_seqno;
- dma_fence_init(&fence->base, &v3d_fence_ops, &queue->queue_lock,
+ dma_fence_init(&fence->base, &v3d_fence_ops, &queue->fence_lock,
queue->fence_context, fence->seqno);
return &fence->base;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index c77d90aa9b82..bb110d35f749 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -273,6 +273,7 @@ v3d_gem_init(struct drm_device *dev)
seqcount_init(&queue->stats.lock);
spin_lock_init(&queue->queue_lock);
+ spin_lock_init(&queue->fence_lock);
}
spin_lock_init(&v3d->mm_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 819704ac675d..d539f25b5fbe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1497,6 +1497,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_bo *vmw_bo = NULL;
+ struct vmw_resource *res;
struct vmw_surface *srf = NULL;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
@@ -1532,18 +1533,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
VMW_RES_DIRTY_SET : 0;
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- dirty, user_surface_converter,
- &cmd->body.host.sid, NULL);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty,
+ user_surface_converter, &cmd->body.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
VMW_DEBUG_USER("could not find surface for DMA.\n");
return ret;
}
- srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
+ res = sw_context->res_cache[vmw_res_surface].res;
+ if (!res) {
+ VMW_DEBUG_USER("Invalid DMA surface.\n");
+ return -EINVAL;
+ }
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
+ srf = vmw_res_to_srf(res);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo,
+ header);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 7ee93e7191c7..35dc94c3db39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -308,8 +308,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
}
node->res = vmw_resource_reference_unless_doomed(res);
- if (!node->res)
+ if (!node->res) {
+ hash_del_rcu(&node->hash.head);
return -ESRCH;
+ }
node->first_usage = 1;
if (!res->dev_priv->has_mob) {
@@ -636,7 +638,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
hash_del_rcu(&val->hash.head);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- hash_del_rcu(&entry->hash.head);
+ hash_del_rcu(&val->hash.head);
ctx->sw_context = NULL;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 49b37dfd4e58..69e2840c7ef0 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -211,15 +211,15 @@ static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
* param generator can be used for both
*/
static const struct xe_ip pre_gmdid_graphics_ips[] = {
- graphics_ip_xelp,
- graphics_ip_xelpp,
- graphics_ip_xehpg,
- graphics_ip_xehpc,
+ { 1200, "Xe_LP", &graphics_xelp },
+ { 1210, "Xe_LP+", &graphics_xelp },
+ { 1255, "Xe_HPG", &graphics_xehpg },
+ { 1260, "Xe_HPC", &graphics_xehpc },
};
static const struct xe_ip pre_gmdid_media_ips[] = {
- media_ip_xem,
- media_ip_xehpm,
+ { 1200, "Xe_M", &media_xem },
+ { 1255, "Xe_HPM", &media_xem },
};
KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 8422f3cab113..4410e28dee54 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1737,6 +1737,24 @@ static bool should_migrate_to_smem(struct xe_bo *bo)
bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
}
+static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx)
+{
+ long lerr;
+
+ if (ctx->no_wait_gpu)
+ return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ?
+ 0 : -EBUSY;
+
+ lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ ctx->interruptible, MAX_SCHEDULE_TIMEOUT);
+ if (lerr < 0)
+ return lerr;
+ if (lerr == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
/* Populate the bo if swapped out, or migrate if the access mode requires that. */
static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
struct drm_exec *exec)
@@ -1745,10 +1763,9 @@ static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
int err = 0;
if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
- xe_assert(xe_bo_device(bo),
- dma_resv_test_signaled(tbo->base.resv, DMA_RESV_USAGE_KERNEL) ||
- (tbo->ttm && ttm_tt_is_populated(tbo->ttm)));
- err = ttm_bo_populate(&bo->ttm, ctx);
+ err = xe_bo_wait_usage_kernel(bo, ctx);
+ if (!err)
+ err = ttm_bo_populate(&bo->ttm, ctx);
} else if (should_migrate_to_smem(bo)) {
xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
@@ -1922,7 +1939,6 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
.no_wait_gpu = false,
.gfp_retry_mayfail = retry_after_wait,
};
- long lerr;
err = drm_exec_lock_obj(&exec, &tbo->base);
drm_exec_retry_on_contention(&exec);
@@ -1942,13 +1958,9 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
break;
}
- lerr = dma_resv_wait_timeout(tbo->base.resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (lerr < 0) {
- err = lerr;
+ err = xe_bo_wait_usage_kernel(bo, &tctx);
+ if (err)
break;
- }
if (!retry_after_wait)
ret = __xe_bo_cpu_fault(vmf, xe, bo);
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index 8a9b950e7a6d..139663423185 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -126,8 +126,20 @@
* not intended for normal execution and will taint the kernel with TAINT_TEST
* when used.
*
- * Currently this is implemented only for post and mid context restore.
- * Examples:
+ * The syntax allows to pass straight instructions to be executed by the engine
+ * in a batch buffer or set specific registers.
+ *
+ * #. Generic instruction::
+ *
+ * <engine-class> cmd <instr> [[dword0] [dword1] [...]]
+ *
+ * #. Simple register setting::
+ *
+ * <engine-class> reg <address> <value>
+ *
+ * Commands are saved per engine class: all instances of that class will execute
+ * those commands during context switch. The instruction, dword arguments,
+ * addresses and values are in hex format like in the examples below.
*
* #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
* normal context restore::
@@ -154,7 +166,8 @@
* When using multiple lines, make sure to use a command that is
* implemented with a single write syscall, like HEREDOC.
*
- * These attributes can only be set before binding to the device.
+ * Currently this is implemented only for post and mid context restore and
+ * these attributes can only be set before binding to the device.
*
* Remove devices
* ==============
@@ -324,8 +337,8 @@ static const struct engine_info *lookup_engine_info(const char *pattern, u64 *ma
continue;
pattern += strlen(engine_info[i].cls);
- if (!mask && !*pattern)
- return &engine_info[i];
+ if (!mask)
+ return *pattern ? NULL : &engine_info[i];
if (!strcmp(pattern, "*")) {
*mask = engine_info[i].mask;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index fdb7b7498920..2883b39c9b37 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -685,16 +685,16 @@ static int wait_for_lmem_ready(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void sriov_update_device_info(struct xe_device *xe)
+static void vf_update_device_info(struct xe_device *xe)
{
+ xe_assert(xe, IS_SRIOV_VF(xe));
/* disable features that are not available/applicable to VFs */
- if (IS_SRIOV_VF(xe)) {
- xe->info.probe_display = 0;
- xe->info.has_heci_cscfi = 0;
- xe->info.has_heci_gscfi = 0;
- xe->info.skip_guc_pc = 1;
- xe->info.skip_pcode = 1;
- }
+ xe->info.probe_display = 0;
+ xe->info.has_heci_cscfi = 0;
+ xe->info.has_heci_gscfi = 0;
+ xe->info.has_late_bind = 0;
+ xe->info.skip_guc_pc = 1;
+ xe->info.skip_pcode = 1;
}
static int xe_device_vram_alloc(struct xe_device *xe)
@@ -735,7 +735,8 @@ int xe_device_probe_early(struct xe_device *xe)
xe_sriov_probe_early(xe);
- sriov_update_device_info(xe);
+ if (IS_SRIOV_VF(xe))
+ vf_update_device_info(xe);
err = xe_pcode_probe_early(xe);
if (err || xe_survivability_mode_is_requested(xe)) {
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 58bee3ffe881..fa4db5f23342 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -213,17 +213,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
err = q->ops->suspend_wait(q);
if (err)
- goto err_suspend;
+ return err;
}
if (need_resume)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
return 0;
-
-err_suspend:
- up_write(&group->mode_sem);
- return err;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.c b/drivers/gpu/drm/xe/xe_late_bind_fw.c
index 38f3feb2aecd..768442ca7da6 100644
--- a/drivers/gpu/drm/xe/xe_late_bind_fw.c
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.c
@@ -60,7 +60,7 @@ static int parse_cpd_header(struct xe_late_bind_fw *lb_fw,
const struct gsc_manifest_header *manifest;
const struct gsc_cpd_entry *entry;
size_t min_size = sizeof(*header);
- u32 offset;
+ u32 offset = 0;
int i;
/* manifest_entry is mandatory */
@@ -116,7 +116,7 @@ static int parse_lb_layout(struct xe_late_bind_fw *lb_fw,
const struct csc_fpt_header *header = data;
const struct csc_fpt_entry *entry;
size_t min_size = sizeof(*header);
- u32 offset;
+ u32 offset = 0;
int i;
/* fpt_entry is mandatory */
@@ -184,17 +184,13 @@ static const char *xe_late_bind_parse_status(uint32_t status)
}
}
-static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind)
+static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind, u32 *num_fans)
{
struct xe_device *xe = late_bind_to_xe(late_bind);
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
- u32 uval;
- if (!xe_pcode_read(root_tile,
- PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), &uval, NULL))
- return uval;
- else
- return 0;
+ return xe_pcode_read(root_tile,
+ PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), num_fans, NULL);
}
void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind)
@@ -314,7 +310,11 @@ static int __xe_late_bind_fw_init(struct xe_late_bind *late_bind, u32 fw_id)
lb_fw->flags &= ~INTEL_LB_FLAG_IS_PERSISTENT;
if (lb_fw->type == INTEL_LB_TYPE_FAN_CONTROL) {
- num_fans = xe_late_bind_fw_num_fans(late_bind);
+ ret = xe_late_bind_fw_num_fans(late_bind, &num_fans);
+ if (ret) {
+ drm_dbg(&xe->drm, "Failed to read number of fans: %d\n", ret);
+ return 0; /* Not a fatal error, continue without fan control */
+ }
drm_dbg(&xe->drm, "Number of Fans: %d\n", num_fans);
if (!num_fans)
return 0;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index d6625c71115b..2c5a44377994 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -201,7 +201,7 @@ int xe_pm_resume(struct xe_device *xe)
if (err)
goto err;
- xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+ xe_i2c_pm_resume(xe, true);
xe_irq_resume(xe);
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index e1b603aba61b..2e9ff33ed2fe 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -276,8 +276,7 @@ static int query_mem_regions(struct xe_device *xe,
mem_regions->mem_regions[0].instance = 0;
mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
- if (perfmon_capable())
- mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
+ mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
mem_regions->num_mem_regions = 1;
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
@@ -293,13 +292,11 @@ static int query_mem_regions(struct xe_device *xe,
mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
man->size;
- if (perfmon_capable()) {
- xe_ttm_vram_get_used(man,
- &mem_regions->mem_regions
- [mem_regions->num_mem_regions].used,
- &mem_regions->mem_regions
- [mem_regions->num_mem_regions].cpu_visible_used);
- }
+ xe_ttm_vram_get_used(man,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].used,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].cpu_visible_used);
mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
xe_ttm_vram_get_cpu_visible_size(man);
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 7f2f1f041f1d..7e2db71ff34e 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -67,11 +67,6 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
range_debug(range, operation);
}
-static void *xe_svm_devm_owner(struct xe_device *xe)
-{
- return xe;
-}
-
static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
{
@@ -744,15 +739,14 @@ int xe_svm_init(struct xe_vm *vm)
xe_svm_garbage_collector_work_func);
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
- current->mm, xe_svm_devm_owner(vm->xe), 0,
- vm->size,
+ current->mm, 0, vm->size,
xe_modparam.svm_notifier_size * SZ_1M,
&gpusvm_ops, fault_chunk_sizes,
ARRAY_SIZE(fault_chunk_sizes));
drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
} else {
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
- &vm->xe->drm, NULL, NULL, 0, 0, 0, NULL,
+ &vm->xe->drm, NULL, 0, 0, 0, NULL,
NULL, 0);
}
@@ -1017,6 +1011,7 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
.devmem_only = need_vram && devmem_possible,
.timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
+ .device_private_page_owner = xe_svm_devm_owner(vm->xe),
};
struct xe_validation_ctx vctx;
struct drm_exec exec;
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index cef6ee7d6fe3..0955d2ac8d74 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,6 +6,20 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+struct xe_device;
+
+/**
+ * xe_svm_devm_owner() - Return the owner of device private memory
+ * @xe: The xe device.
+ *
+ * Return: The owner of this device's device private memory to use in
+ * hmm_range_fault()-
+ */
+static inline void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c
index 91d09af71ced..f16e92cd8090 100644
--- a/drivers/gpu/drm/xe/xe_userptr.c
+++ b/drivers/gpu/drm/xe/xe_userptr.c
@@ -54,6 +54,7 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
struct xe_device *xe = vm->xe;
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
+ .device_private_page_owner = NULL,
};
lockdep_assert_held(&vm->lock);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 0cacab20ff85..027e6ce648c5 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2881,6 +2881,7 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+ ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 74b66aec33d4..ee86df4cff4b 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -30,7 +30,7 @@ static int i2c_debug;
#define pca_clock(adap) adap->i2c_clock
#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val)
#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON)
-#define pca_wait(adap) adap->wait_for_completion(adap->data)
+#define pca_wait(adap) adap->wait_for_completion_cb(adap->data)
static void pca_reset(struct i2c_algo_pca_data *adap)
{
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index dee40704825c..aefdbee1f03c 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -868,7 +868,7 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
return 0;
}
-static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
+static void mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
{
unsigned int clk_src;
unsigned int step_cnt;
@@ -938,9 +938,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
break;
}
-
-
- return 0;
}
static void i2c_dump_register(struct mtk_i2c *i2c)
@@ -1460,11 +1457,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
- ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the speed.\n");
- return -EINVAL;
- }
+ mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (i2c->dev_comp->max_dma_support > 32) {
ret = dma_set_mask(&pdev->dev,
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 85e8cf58e8bf..0cbf2f509527 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -95,7 +95,7 @@ static struct i2c_algo_pca_data pca_isa_data = {
/* .data intentionally left NULL, not needed with ISA */
.write_byte = pca_isa_writebyte,
.read_byte = pca_isa_readbyte,
- .wait_for_completion = pca_isa_waitforcompletion,
+ .wait_for_completion_cb = pca_isa_waitforcompletion,
.reset_chip = pca_isa_resetchip,
};
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 87da8241b927..c0f35ebbe37d 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -180,7 +180,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
}
i2c->algo_data.data = i2c;
- i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion;
+ i2c->algo_data.wait_for_completion_cb = i2c_pca_pf_waitforcompletion;
if (i2c->gpio)
i2c->algo_data.reset_chip = i2c_pca_pf_resetchip;
else
diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
index af991b28e4f8..4723e48cfe18 100644
--- a/drivers/i2c/busses/i2c-rtl9300.c
+++ b/drivers/i2c/busses/i2c-rtl9300.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/unaligned.h>
enum rtl9300_bus_freq {
RTL9300_I2C_STD_FREQ,
@@ -20,103 +21,143 @@ struct rtl9300_i2c_chan {
struct i2c_adapter adap;
struct rtl9300_i2c *i2c;
enum rtl9300_bus_freq bus_freq;
- u8 sda_pin;
+ u8 sda_num;
+};
+
+enum rtl9300_i2c_reg_scope {
+ REG_SCOPE_GLOBAL,
+ REG_SCOPE_MASTER,
+};
+
+struct rtl9300_i2c_reg_field {
+ struct reg_field field;
+ enum rtl9300_i2c_reg_scope scope;
+};
+
+enum rtl9300_i2c_reg_fields {
+ F_DATA_WIDTH = 0,
+ F_DEV_ADDR,
+ F_I2C_FAIL,
+ F_I2C_TRIG,
+ F_MEM_ADDR,
+ F_MEM_ADDR_WIDTH,
+ F_RD_MODE,
+ F_RWOP,
+ F_SCL_FREQ,
+ F_SCL_SEL,
+ F_SDA_OUT_SEL,
+ F_SDA_SEL,
+
+ /* keep last */
+ F_NUM_FIELDS
+};
+
+struct rtl9300_i2c_drv_data {
+ struct rtl9300_i2c_reg_field field_desc[F_NUM_FIELDS];
+ int (*select_scl)(struct rtl9300_i2c *i2c, u8 scl);
+ u32 data_reg;
+ u8 max_nchan;
};
#define RTL9300_I2C_MUX_NCHAN 8
+#define RTL9310_I2C_MUX_NCHAN 12
struct rtl9300_i2c {
struct regmap *regmap;
struct device *dev;
- struct rtl9300_i2c_chan chans[RTL9300_I2C_MUX_NCHAN];
+ struct rtl9300_i2c_chan chans[RTL9310_I2C_MUX_NCHAN];
+ struct regmap_field *fields[F_NUM_FIELDS];
u32 reg_base;
- u8 sda_pin;
+ u32 data_reg;
+ u8 scl_num;
+ u8 sda_num;
struct mutex lock;
};
+DEFINE_GUARD(rtl9300_i2c, struct rtl9300_i2c *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock))
+
+enum rtl9300_i2c_xfer_type {
+ RTL9300_I2C_XFER_BYTE,
+ RTL9300_I2C_XFER_WORD,
+ RTL9300_I2C_XFER_BLOCK,
+};
+
+struct rtl9300_i2c_xfer {
+ enum rtl9300_i2c_xfer_type type;
+ u16 dev_addr;
+ u8 reg_addr;
+ u8 reg_addr_len;
+ u8 *data;
+ u8 data_len;
+ bool write;
+};
+
#define RTL9300_I2C_MST_CTRL1 0x0
-#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS 8
-#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK GENMASK(31, 8)
-#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS 4
-#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK GENMASK(6, 4)
-#define RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL BIT(3)
-#define RTL9300_I2C_MST_CTRL1_RWOP BIT(2)
-#define RTL9300_I2C_MST_CTRL1_I2C_FAIL BIT(1)
-#define RTL9300_I2C_MST_CTRL1_I2C_TRIG BIT(0)
#define RTL9300_I2C_MST_CTRL2 0x4
-#define RTL9300_I2C_MST_CTRL2_RD_MODE BIT(15)
-#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS 8
-#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK GENMASK(14, 8)
-#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS 4
-#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK GENMASK(7, 4)
-#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS 2
-#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK GENMASK(3, 2)
-#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS 0
-#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK GENMASK(1, 0)
#define RTL9300_I2C_MST_DATA_WORD0 0x8
#define RTL9300_I2C_MST_DATA_WORD1 0xc
#define RTL9300_I2C_MST_DATA_WORD2 0x10
#define RTL9300_I2C_MST_DATA_WORD3 0x14
-
#define RTL9300_I2C_MST_GLB_CTRL 0x384
+#define RTL9310_I2C_MST_IF_CTRL 0x1004
+#define RTL9310_I2C_MST_IF_SEL 0x1008
+#define RTL9310_I2C_MST_CTRL 0x0
+#define RTL9310_I2C_MST_MEMADDR_CTRL 0x4
+#define RTL9310_I2C_MST_DATA_CTRL 0x8
+
static int rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len)
{
- u32 val, mask;
int ret;
- val = len << RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS;
- mask = RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK;
-
- ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val);
+ ret = regmap_field_write(i2c->fields[F_MEM_ADDR_WIDTH], len);
if (ret)
return ret;
- val = reg << RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS;
- mask = RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK;
-
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+ return regmap_field_write(i2c->fields[F_MEM_ADDR], reg);
}
-static int rtl9300_i2c_config_io(struct rtl9300_i2c *i2c, u8 sda_pin)
+static int rtl9300_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl)
{
- int ret;
- u32 val, mask;
-
- ret = regmap_update_bits(i2c->regmap, RTL9300_I2C_MST_GLB_CTRL, BIT(sda_pin), BIT(sda_pin));
- if (ret)
- return ret;
-
- val = (sda_pin << RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS) |
- RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL;
- mask = RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK | RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL;
+ return regmap_field_write(i2c->fields[F_SCL_SEL], 1);
+}
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+static int rtl9310_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl)
+{
+ return regmap_field_update_bits(i2c->fields[F_SCL_SEL], BIT(scl), BIT(scl));
}
-static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan,
- u16 addr, u16 len)
+static int rtl9300_i2c_config_chan(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan)
{
- u32 val, mask;
+ struct rtl9300_i2c_drv_data *drv_data;
+ int ret;
- if (len < 1 || len > 16)
- return -EINVAL;
+ if (i2c->sda_num == chan->sda_num)
+ return 0;
- val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS;
- mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK;
+ ret = regmap_field_write(i2c->fields[F_SCL_FREQ], chan->bus_freq);
+ if (ret)
+ return ret;
- val |= addr << RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS;
- mask |= RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK;
+ drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev);
+ ret = drv_data->select_scl(i2c, i2c->scl_num);
+ if (ret)
+ return ret;
- val |= ((len - 1) & 0xf) << RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS;
- mask |= RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK;
+ ret = regmap_field_update_bits(i2c->fields[F_SDA_SEL], BIT(chan->sda_num),
+ BIT(chan->sda_num));
+ if (ret)
+ return ret;
- mask |= RTL9300_I2C_MST_CTRL2_RD_MODE;
+ ret = regmap_field_write(i2c->fields[F_SDA_OUT_SEL], chan->sda_num);
+ if (ret)
+ return ret;
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val);
+ i2c->sda_num = chan->sda_num;
+ return 0;
}
-static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
+static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, u8 len)
{
u32 vals[4] = {};
int i, ret;
@@ -124,8 +165,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
if (len > 16)
return -EIO;
- ret = regmap_bulk_read(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
- vals, ARRAY_SIZE(vals));
+ ret = regmap_bulk_read(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals));
if (ret)
return ret;
@@ -137,7 +177,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
return 0;
}
-static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
+static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, u8 len)
{
u32 vals[4] = {};
int i;
@@ -152,56 +192,94 @@ static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
vals[reg] |= buf[i] << shift;
}
- return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
- vals, ARRAY_SIZE(vals));
+ return regmap_bulk_write(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals));
}
static int rtl9300_i2c_writel(struct rtl9300_i2c *i2c, u32 data)
{
- return regmap_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, data);
+ return regmap_write(i2c->regmap, i2c->data_reg, data);
}
-static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write,
- int size, union i2c_smbus_data *data, int len)
+static int rtl9300_i2c_prepare_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer)
{
- u32 val, mask;
int ret;
- val = read_write == I2C_SMBUS_WRITE ? RTL9300_I2C_MST_CTRL1_RWOP : 0;
- mask = RTL9300_I2C_MST_CTRL1_RWOP;
+ if (xfer->data_len < 1 || xfer->data_len > 16)
+ return -EINVAL;
+
+ ret = regmap_field_write(i2c->fields[F_DEV_ADDR], xfer->dev_addr);
+ if (ret)
+ return ret;
+
+ ret = rtl9300_i2c_reg_addr_set(i2c, xfer->reg_addr, xfer->reg_addr_len);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(i2c->fields[F_RWOP], xfer->write);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(i2c->fields[F_DATA_WIDTH], (xfer->data_len - 1) & 0xf);
+ if (ret)
+ return ret;
- val |= RTL9300_I2C_MST_CTRL1_I2C_TRIG;
- mask |= RTL9300_I2C_MST_CTRL1_I2C_TRIG;
+ if (xfer->write) {
+ switch (xfer->type) {
+ case RTL9300_I2C_XFER_BYTE:
+ ret = rtl9300_i2c_writel(i2c, *xfer->data);
+ break;
+ case RTL9300_I2C_XFER_WORD:
+ ret = rtl9300_i2c_writel(i2c, get_unaligned((const u16 *)xfer->data));
+ break;
+ default:
+ ret = rtl9300_i2c_write(i2c, xfer->data, xfer->data_len);
+ break;
+ }
+ }
- ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+ return ret;
+}
+
+static int rtl9300_i2c_do_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_field_write(i2c->fields[F_I2C_TRIG], 1);
if (ret)
return ret;
- ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1,
- val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 100000);
+ ret = regmap_field_read_poll_timeout(i2c->fields[F_I2C_TRIG], val, !val, 100, 100000);
if (ret)
return ret;
- if (val & RTL9300_I2C_MST_CTRL1_I2C_FAIL)
+ ret = regmap_field_read(i2c->fields[F_I2C_FAIL], &val);
+ if (ret)
+ return ret;
+ if (val)
return -EIO;
- if (read_write == I2C_SMBUS_READ) {
- if (size == I2C_SMBUS_BYTE || size == I2C_SMBUS_BYTE_DATA) {
- ret = regmap_read(i2c->regmap,
- i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val);
+ if (!xfer->write) {
+ switch (xfer->type) {
+ case RTL9300_I2C_XFER_BYTE:
+ ret = regmap_read(i2c->regmap, i2c->data_reg, &val);
if (ret)
return ret;
- data->byte = val & 0xff;
- } else if (size == I2C_SMBUS_WORD_DATA) {
- ret = regmap_read(i2c->regmap,
- i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val);
+
+ *xfer->data = val & 0xff;
+ break;
+ case RTL9300_I2C_XFER_WORD:
+ ret = regmap_read(i2c->regmap, i2c->data_reg, &val);
if (ret)
return ret;
- data->word = val & 0xffff;
- } else {
- ret = rtl9300_i2c_read(i2c, &data->block[0], len);
+
+ put_unaligned(val & 0xffff, (u16*)xfer->data);
+ break;
+ default:
+ ret = rtl9300_i2c_read(i2c, xfer->data, xfer->data_len);
if (ret)
return ret;
+ break;
}
}
@@ -214,100 +292,68 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
{
struct rtl9300_i2c_chan *chan = i2c_get_adapdata(adap);
struct rtl9300_i2c *i2c = chan->i2c;
- int len = 0, ret;
+ struct rtl9300_i2c_xfer xfer = {0};
+ int ret;
- mutex_lock(&i2c->lock);
- if (chan->sda_pin != i2c->sda_pin) {
- ret = rtl9300_i2c_config_io(i2c, chan->sda_pin);
- if (ret)
- goto out_unlock;
- i2c->sda_pin = chan->sda_pin;
- }
+ if (addr > 0x7f)
+ return -EINVAL;
+
+ guard(rtl9300_i2c)(i2c);
+
+ ret = rtl9300_i2c_config_chan(i2c, chan);
+ if (ret)
+ return ret;
+
+ xfer.dev_addr = addr & 0x7f;
+ xfer.write = (read_write == I2C_SMBUS_WRITE);
+ xfer.reg_addr = command;
+ xfer.reg_addr_len = 1;
switch (size) {
case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- } else {
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = (read_write == I2C_SMBUS_READ) ? &data->byte : &command;
+ xfer.data_len = 1;
+ xfer.reg_addr = 0;
+ xfer.reg_addr_len = 0;
+ xfer.type = RTL9300_I2C_XFER_BYTE;
break;
-
case I2C_SMBUS_BYTE_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_writel(i2c, data->byte);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = &data->byte;
+ xfer.data_len = 1;
+ xfer.type = RTL9300_I2C_XFER_BYTE;
break;
-
case I2C_SMBUS_WORD_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 2);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_writel(i2c, data->word);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = (u8 *)&data->word;
+ xfer.data_len = 2;
+ xfer.type = RTL9300_I2C_XFER_WORD;
break;
-
case I2C_SMBUS_BLOCK_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) {
- ret = -EINVAL;
- goto out_unlock;
- }
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0] + 1);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_write(i2c, &data->block[0], data->block[0] + 1);
- if (ret)
- goto out_unlock;
- }
- len = data->block[0] + 1;
+ xfer.data = &data->block[0];
+ xfer.data_len = data->block[0] + 1;
+ xfer.type = RTL9300_I2C_XFER_BLOCK;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ xfer.data = &data->block[1];
+ xfer.data_len = data->block[0];
+ xfer.type = RTL9300_I2C_XFER_BLOCK;
break;
-
default:
dev_err(&adap->dev, "Unsupported transaction %d\n", size);
- ret = -EOPNOTSUPP;
- goto out_unlock;
+ return -EOPNOTSUPP;
}
- ret = rtl9300_i2c_execute_xfer(i2c, read_write, size, data, len);
-
-out_unlock:
- mutex_unlock(&i2c->lock);
+ ret = rtl9300_i2c_prepare_xfer(i2c, &xfer);
+ if (ret)
+ return ret;
- return ret;
+ return rtl9300_i2c_do_xfer(i2c, &xfer);
}
static u32 rtl9300_i2c_func(struct i2c_adapter *a)
{
return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA;
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
}
static const struct i2c_algorithm rtl9300_i2c_algo = {
@@ -325,9 +371,11 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rtl9300_i2c *i2c;
- u32 clock_freq, sda_pin;
- int ret, i = 0;
struct fwnode_handle *child;
+ struct rtl9300_i2c_drv_data *drv_data;
+ struct reg_field fields[F_NUM_FIELDS];
+ u32 clock_freq, scl_num, sda_num;
+ int ret, i = 0;
i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -344,16 +392,34 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = device_property_read_u32(dev, "realtek,scl", &scl_num);
+ if (ret || scl_num != 1)
+ scl_num = 0;
+ i2c->scl_num = (u8)scl_num;
+
platform_set_drvdata(pdev, i2c);
- if (device_get_child_node_count(dev) > RTL9300_I2C_MUX_NCHAN)
+ drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev);
+ if (device_get_child_node_count(dev) > drv_data->max_nchan)
return dev_err_probe(dev, -EINVAL, "Too many channels\n");
+ i2c->data_reg = i2c->reg_base + drv_data->data_reg;
+ for (i = 0; i < F_NUM_FIELDS; i++) {
+ fields[i] = drv_data->field_desc[i].field;
+ if (drv_data->field_desc[i].scope == REG_SCOPE_MASTER)
+ fields[i].reg += i2c->reg_base;
+ }
+ ret = devm_regmap_field_bulk_alloc(dev, i2c->regmap, i2c->fields,
+ fields, F_NUM_FIELDS);
+ if (ret)
+ return ret;
+
+ i = 0;
device_for_each_child_node(dev, child) {
struct rtl9300_i2c_chan *chan = &i2c->chans[i];
struct i2c_adapter *adap = &chan->adap;
- ret = fwnode_property_read_u32(child, "reg", &sda_pin);
+ ret = fwnode_property_read_u32(child, "reg", &sda_num);
if (ret)
return ret;
@@ -365,17 +431,16 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
case I2C_MAX_STANDARD_MODE_FREQ:
chan->bus_freq = RTL9300_I2C_STD_FREQ;
break;
-
case I2C_MAX_FAST_MODE_FREQ:
chan->bus_freq = RTL9300_I2C_FAST_FREQ;
break;
default:
dev_warn(i2c->dev, "SDA%d clock-frequency %d not supported using default\n",
- sda_pin, clock_freq);
+ sda_num, clock_freq);
break;
}
- chan->sda_pin = sda_pin;
+ chan->sda_num = sda_num;
chan->i2c = i2c;
adap = &i2c->chans[i].adap;
adap->owner = THIS_MODULE;
@@ -385,23 +450,77 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
adap->dev.parent = dev;
i2c_set_adapdata(adap, chan);
adap->dev.of_node = to_of_node(child);
- snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_pin);
+ snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_num);
i++;
ret = devm_i2c_add_adapter(dev, adap);
if (ret)
return ret;
}
- i2c->sda_pin = 0xff;
+ i2c->sda_num = 0xff;
+
+ /* only use standard read format */
+ ret = regmap_field_write(i2c->fields[F_RD_MODE], 0);
+ if (ret)
+ return ret;
return 0;
}
+#define GLB_REG_FIELD(reg, msb, lsb) \
+ { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_GLOBAL }
+#define MST_REG_FIELD(reg, msb, lsb) \
+ { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_MASTER }
+
+static const struct rtl9300_i2c_drv_data rtl9300_i2c_drv_data = {
+ .field_desc = {
+ [F_MEM_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 8, 31),
+ [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 4, 6),
+ [F_SCL_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 3, 3),
+ [F_RWOP] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 2, 2),
+ [F_I2C_FAIL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 1, 1),
+ [F_I2C_TRIG] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 0, 0),
+ [F_RD_MODE] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 15, 15),
+ [F_DEV_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 8, 14),
+ [F_DATA_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 4, 7),
+ [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 2, 3),
+ [F_SCL_FREQ] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 0, 1),
+ [F_SDA_SEL] = GLB_REG_FIELD(RTL9300_I2C_MST_GLB_CTRL, 0, 7),
+ },
+ .select_scl = rtl9300_i2c_select_scl,
+ .data_reg = RTL9300_I2C_MST_DATA_WORD0,
+ .max_nchan = RTL9300_I2C_MUX_NCHAN,
+};
+
+static const struct rtl9300_i2c_drv_data rtl9310_i2c_drv_data = {
+ .field_desc = {
+ [F_SCL_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 12, 13),
+ [F_SDA_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 0, 11),
+ [F_SCL_FREQ] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 30, 31),
+ [F_DEV_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 11, 17),
+ [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 18, 21),
+ [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 9, 10),
+ [F_DATA_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 5, 8),
+ [F_RD_MODE] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 4, 4),
+ [F_RWOP] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 2, 2),
+ [F_I2C_FAIL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 1, 1),
+ [F_I2C_TRIG] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 0, 0),
+ [F_MEM_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_MEMADDR_CTRL, 0, 23),
+ },
+ .select_scl = rtl9310_i2c_select_scl,
+ .data_reg = RTL9310_I2C_MST_DATA_CTRL,
+ .max_nchan = RTL9310_I2C_MUX_NCHAN,
+};
+
static const struct of_device_id i2c_rtl9300_dt_ids[] = {
- { .compatible = "realtek,rtl9301-i2c" },
- { .compatible = "realtek,rtl9302b-i2c" },
- { .compatible = "realtek,rtl9302c-i2c" },
- { .compatible = "realtek,rtl9303-i2c" },
+ { .compatible = "realtek,rtl9301-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9302b-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9302c-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9303-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9310-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9311-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9312-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9313-i2c", .data = (void *) &rtl9310_i2c_drv_data },
{}
};
MODULE_DEVICE_TABLE(of, i2c_rtl9300_dt_ids);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 63ceed89b62e..1a163596ddf5 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -197,13 +197,22 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
{
- struct pci_dev *pdev = NULL;
+ struct pci_dev *pdev = ndev->ntb.pdev;
struct pci_dev *pci_swds = NULL;
struct pci_dev *pci_swus = NULL;
u32 stat;
int rc;
if (ndev->ntb.topo == NTB_TOPO_SEC) {
+ if (ndev->dev_data->is_endpoint) {
+ rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
+ if (rc)
+ return rc;
+
+ ndev->lnk_sta = stat;
+ return 0;
+ }
+
/* Locate the pointer to Downstream Switch for this device */
pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
if (pci_swds) {
@@ -1311,6 +1320,11 @@ static const struct ntb_dev_data dev_data[] = {
.mw_count = 2,
.mw_idx = 2,
},
+ { /* for device 0x17d7 */
+ .mw_count = 2,
+ .mw_idx = 2,
+ .is_endpoint = true,
+ },
};
static const struct pci_device_id amd_ntb_pci_tbl[] = {
@@ -1319,6 +1333,8 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x17d4), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x17d7), (kernel_ulong_t)&dev_data[2] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 5f337b1572a0..e8c3165fa38b 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -168,6 +168,7 @@ enum {
struct ntb_dev_data {
const unsigned char mw_count;
const unsigned int mw_idx;
+ const bool is_endpoint;
};
struct amd_ntb_dev;
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index 00f0e78f685b..d3ecf25a5162 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -49,6 +49,7 @@
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
enum pci_barno {
+ NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -57,16 +58,26 @@ enum pci_barno {
BAR_5,
};
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_PEER_SPAD,
+ BAR_DB,
+ BAR_MW1,
+ BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+ NTB_BAR_NUM,
+};
+
+#define NTB_EPF_MAX_MW_COUNT (NTB_BAR_NUM - BAR_MW1)
+
struct ntb_epf_dev {
struct ntb_dev ntb;
struct device *dev;
/* Mutex to protect providing commands to NTB EPF */
struct mutex cmd_lock;
- enum pci_barno ctrl_reg_bar;
- enum pci_barno peer_spad_reg_bar;
- enum pci_barno db_reg_bar;
- enum pci_barno mw_bar;
+ const enum pci_barno *barno_map;
unsigned int mw_count;
unsigned int spad_count;
@@ -85,17 +96,6 @@ struct ntb_epf_dev {
#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
-struct ntb_epf_data {
- /* BAR that contains both control region and self spad region */
- enum pci_barno ctrl_reg_bar;
- /* BAR that contains peer spad region */
- enum pci_barno peer_spad_reg_bar;
- /* BAR that contains Doorbell region and Memory window '1' */
- enum pci_barno db_reg_bar;
- /* BAR that contains memory windows*/
- enum pci_barno mw_bar;
-};
-
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
u32 argument)
{
@@ -144,7 +144,7 @@ static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
return -EINVAL;
}
- return idx + 2;
+ return ndev->barno_map[BAR_MW1 + idx];
}
static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
@@ -413,7 +413,9 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EINVAL;
}
- bar = idx + ndev->mw_bar;
+ bar = ntb_epf_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
mw_size = pci_resource_len(ntb->pdev, bar);
@@ -455,7 +457,9 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
- bar = idx + ndev->mw_bar;
+ bar = ntb_epf_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
@@ -560,6 +564,11 @@ static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT);
ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+ if (ndev->mw_count > NTB_EPF_MAX_MW_COUNT) {
+ dev_err(dev, "Unsupported MW count: %u\n", ndev->mw_count);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -596,14 +605,15 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
- ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
+ ndev->ctrl_reg = pci_iomap(pdev, ndev->barno_map[BAR_CONFIG], 0);
if (!ndev->ctrl_reg) {
ret = -EIO;
goto err_pci_regions;
}
- if (ndev->peer_spad_reg_bar) {
- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (ndev->barno_map[BAR_PEER_SPAD] != ndev->barno_map[BAR_CONFIG]) {
+ ndev->peer_spad_reg = pci_iomap(pdev,
+ ndev->barno_map[BAR_PEER_SPAD], 0);
if (!ndev->peer_spad_reg) {
ret = -EIO;
goto err_pci_regions;
@@ -614,7 +624,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
- ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
+ ndev->db_reg = pci_iomap(pdev, ndev->barno_map[BAR_DB], 0);
if (!ndev->db_reg) {
ret = -EIO;
goto err_pci_regions;
@@ -659,12 +669,7 @@ static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev)
static int ntb_epf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- enum pci_barno peer_spad_reg_bar = BAR_1;
- enum pci_barno ctrl_reg_bar = BAR_0;
- enum pci_barno db_reg_bar = BAR_2;
- enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
- struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
int ret;
@@ -675,18 +680,10 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
if (!ndev)
return -ENOMEM;
- data = (struct ntb_epf_data *)id->driver_data;
- if (data) {
- peer_spad_reg_bar = data->peer_spad_reg_bar;
- ctrl_reg_bar = data->ctrl_reg_bar;
- db_reg_bar = data->db_reg_bar;
- mw_bar = data->mw_bar;
- }
+ ndev->barno_map = (const enum pci_barno *)id->driver_data;
+ if (!ndev->barno_map)
+ return -EINVAL;
- ndev->peer_spad_reg_bar = peer_spad_reg_bar;
- ndev->ctrl_reg_bar = ctrl_reg_bar;
- ndev->db_reg_bar = db_reg_bar;
- ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
@@ -730,30 +727,51 @@ static void ntb_epf_pci_remove(struct pci_dev *pdev)
ntb_epf_deinit_pci(ndev);
}
-static const struct ntb_epf_data j721e_data = {
- .ctrl_reg_bar = BAR_0,
- .peer_spad_reg_bar = BAR_1,
- .db_reg_bar = BAR_2,
- .mw_bar = BAR_2,
+static const enum pci_barno j721e_map[NTB_BAR_NUM] = {
+ [BAR_CONFIG] = BAR_0,
+ [BAR_PEER_SPAD] = BAR_1,
+ [BAR_DB] = BAR_2,
+ [BAR_MW1] = BAR_2,
+ [BAR_MW2] = BAR_3,
+ [BAR_MW3] = BAR_4,
+ [BAR_MW4] = BAR_5
};
-static const struct ntb_epf_data mx8_data = {
- .ctrl_reg_bar = BAR_0,
- .peer_spad_reg_bar = BAR_0,
- .db_reg_bar = BAR_2,
- .mw_bar = BAR_4,
+static const enum pci_barno mx8_map[NTB_BAR_NUM] = {
+ [BAR_CONFIG] = BAR_0,
+ [BAR_PEER_SPAD] = BAR_0,
+ [BAR_DB] = BAR_2,
+ [BAR_MW1] = BAR_4,
+ [BAR_MW2] = BAR_5,
+ [BAR_MW3] = NO_BAR,
+ [BAR_MW4] = NO_BAR
+};
+
+static const enum pci_barno rcar_barno[NTB_BAR_NUM] = {
+ [BAR_CONFIG] = BAR_0,
+ [BAR_PEER_SPAD] = BAR_0,
+ [BAR_DB] = BAR_4,
+ [BAR_MW1] = BAR_2,
+ [BAR_MW2] = NO_BAR,
+ [BAR_MW3] = NO_BAR,
+ [BAR_MW4] = NO_BAR,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
- .driver_data = (kernel_ulong_t)&j721e_data,
+ .driver_data = (kernel_ulong_t)j721e_map,
},
{
PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
- .driver_data = (kernel_ulong_t)&mx8_data,
+ .driver_data = (kernel_ulong_t)mx8_map,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0030),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)rcar_barno,
},
{ },
};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 4f775c3e218f..eb875e3db2e3 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -59,6 +59,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/mutex.h>
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
@@ -241,6 +242,9 @@ struct ntb_transport_ctx {
struct work_struct link_cleanup;
struct dentry *debugfs_node_dir;
+
+ /* Make sure workq of link event be executed serially */
+ struct mutex link_event_lock;
};
enum {
@@ -1024,6 +1028,7 @@ static void ntb_transport_link_cleanup_work(struct work_struct *work)
struct ntb_transport_ctx *nt =
container_of(work, struct ntb_transport_ctx, link_cleanup);
+ guard(mutex)(&nt->link_event_lock);
ntb_transport_link_cleanup(nt);
}
@@ -1047,6 +1052,8 @@ static void ntb_transport_link_work(struct work_struct *work)
u32 val;
int rc = 0, i, spad;
+ guard(mutex)(&nt->link_event_lock);
+
/* send the local info, in the opposite order of the way we read it */
if (nt->use_msi) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 788ccb6ab287..65c3c23255b7 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -163,7 +163,7 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
* @out_irq: structure of_phandle_args updated by this function
*
* This function is a low-level interrupt tree walking function. It
- * can be used to do a partial walk with synthetized reg and interrupts
+ * can be used to do a partial walk with synthesized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
* node exist for the parent. It takes an interrupt specifier structure as
* input, walks the tree looking for any interrupt-map properties, translates
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 1af6f52d0708..255e8362f600 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -135,7 +135,7 @@ static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
* @nb: Notifier block to register
*
* Register for notification on overlay operations on device tree nodes. The
- * reported actions definied by @of_reconfig_change. The notifier callback
+ * reported actions defined by @of_reconfig_change. The notifier callback
* furthermore receives a pointer to the affected device tree node.
*
* Note that a notifier callback is not supposed to store pointers to a device
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e3503ec20f6c..388e9ec2cccf 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -4300,6 +4300,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add)
unittest(!np, "Child device tree node is not removed\n");
child_dev = device_find_any_child(&pdev->dev);
unittest(!child_dev, "Child device is not removed\n");
+ put_device(child_dev);
}
failed:
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index c3b9954df804..a257b739188d 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -126,9 +126,9 @@ config FB_ACORN
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+ depends on LCD_CLASS_DEVICE
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
- select LCD_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
Say Y to enable the Framebuffer driver for the Cirrus Logic
@@ -150,7 +150,7 @@ config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_MXC || COMPILE_TEST
- select LCD_CLASS_DEVICE
+ depends on LCD_CLASS_DEVICE
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
@@ -948,9 +948,6 @@ config FB_RADEON
a framebuffer device. There are both PCI and AGP versions. You
don't need to choose this to run the Radeon in plain VGA mode.
- There is a product page at
- https://products.amd.com/en-us/GraphicCardResult.aspx
-
config FB_RADEON_I2C
bool "DDC/I2C for ATI Radeon support"
depends on FB_RADEON
@@ -1060,6 +1057,7 @@ config FB_S3
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
+ select FB_CFB_REV_PIXELS_IN_BYTE
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for graphics boards with S3 Trio / S3 Virge chip.
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index f9475c14f733..a9ec7f488522 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -160,6 +160,11 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
image.height = vc->vc_font.height;
image.depth = 1;
+ if (image.dy >= info->var.yres)
+ return;
+
+ image.height = min(image.height, info->var.yres - image.dy);
+
if (attribute) {
buf = kmalloc(cellsize, GFP_ATOMIC);
if (!buf)
@@ -173,6 +178,18 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
cnt = count;
image.width = vc->vc_font.width * cnt;
+
+ if (image.dx >= info->var.xres)
+ break;
+
+ if (image.dx + image.width > info->var.xres) {
+ image.width = info->var.xres - image.dx;
+ cnt = image.width / vc->vc_font.width;
+ if (cnt == 0)
+ break;
+ image.width = cnt * vc->vc_font.width;
+ }
+
pitch = DIV_ROUND_UP(image.width, 8) + scan_align;
pitch &= ~scan_align;
size = pitch * image.height + buf_align;
diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
index 4d1634c492ec..594b60424d1c 100644
--- a/drivers/video/fbdev/core/fb_cmdline.c
+++ b/drivers/video/fbdev/core/fb_cmdline.c
@@ -40,7 +40,7 @@ int fb_get_options(const char *name, char **option)
bool enabled;
if (name)
- is_of = strncmp(name, "offb", 4);
+ is_of = !strncmp(name, "offb", 4);
enabled = __video_get_options(name, &options, is_of);
diff --git a/drivers/video/fbdev/core/fb_fillrect.h b/drivers/video/fbdev/core/fb_fillrect.h
index 66042e534de7..f366670a53af 100644
--- a/drivers/video/fbdev/core/fb_fillrect.h
+++ b/drivers/video/fbdev/core/fb_fillrect.h
@@ -92,8 +92,7 @@ static unsigned long pixel_to_pat(int bpp, u32 color)
pattern = pattern | pattern << bpp;
break;
default:
- pattern = color;
- break;
+ return color;
}
#ifndef __LITTLE_ENDIAN
pattern <<= (BITS_PER_LONG % bpp);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 3b779c27c271..0a65bef01e3c 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -36,6 +36,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
#include "../edid.h"
+#include <linux/string_choices.h>
/*
* EDID parser
@@ -320,9 +321,9 @@ static void get_dpms_capabilities(unsigned char flags,
if (flags & DPMS_STANDBY)
specs->dpms |= FB_DPMS_STANDBY;
DPRINTK(" DPMS: Active %s, Suspend %s, Standby %s\n",
- (flags & DPMS_ACTIVE_OFF) ? "yes" : "no",
- (flags & DPMS_SUSPEND) ? "yes" : "no",
- (flags & DPMS_STANDBY) ? "yes" : "no");
+ str_yes_no(flags & DPMS_ACTIVE_OFF),
+ str_yes_no(flags & DPMS_SUSPEND),
+ str_yes_no(flags & DPMS_STANDBY));
}
static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index ade88e7bc760..676c6d3ccc12 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -674,7 +674,7 @@ static int of_platform_mb862xx_probe(struct platform_device *ofdev)
struct fb_info *info;
struct resource res;
resource_size_t res_size;
- unsigned long ret = -ENODEV;
+ int ret = -ENODEV;
if (of_address_to_resource(np, 0, &res)) {
dev_err(dev, "Invalid address\n");
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index cfaf9454014d..72b85f475605 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/console.h>
#include <linux/backlight.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
@@ -622,7 +623,7 @@ static int nvidiafb_set_par(struct fb_info *info)
else
par->FPDither = !!(NV_RD32(par->PRAMDAC, 0x083C) & 1);
printk(KERN_INFO PFX "Flat panel dithering %s\n",
- par->FPDither ? "enabled" : "disabled");
+ str_enabled_disabled(par->FPDither));
}
info->fix.visual = (info->var.bits_per_pixel == 8) ?
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index baf87f34cc24..b96a8a96bce8 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -60,6 +60,7 @@
#include <linux/soc/pxa/cpu.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
+#include <linux/string_choices.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -1419,7 +1420,7 @@ static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
if (ret < 0)
pr_warn("Unable to %s LCD supply regulator: %d\n",
- on ? "enable" : "disable", ret);
+ str_enable_disable(on), ret);
else
fbi->lcd_supply_enabled = on;
}
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index ff84106ecf1c..ba30e5568cab 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -50,10 +50,14 @@ struct s3fb_info {
static const struct svga_fb_format s3fb_formats[] = {
{ 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 16},
- { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0,
- FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
+ { 1, {0, 1, 0}, {0, 1, 0}, {0, 1, 0}, {0, 0, 0}, 2,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 32, 64},
+ { 2, {0, 2, 0}, {0, 2, 0}, {0, 2, 0}, {0, 0, 0}, 2,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 32},
{ 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 1,
FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
+ { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 2,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
{ 8, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 4, 8},
{16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
@@ -557,7 +561,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
/* 32bpp mode is not supported on VIRGE VX,
24bpp is not supported on others */
- if ((par->chip == CHIP_988_VIRGE_VX) ? (rv == 7) : (rv == 6))
+ if ((par->chip == CHIP_988_VIRGE_VX) ? (rv == 9) : (rv == 8))
rv = -EINVAL;
if (rv < 0) {
@@ -607,7 +611,7 @@ static int s3fb_set_par(struct fb_info *info)
struct s3fb_info *par = info->par;
u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
u32 bpp = info->var.bits_per_pixel;
- u32 htotal, hsstart;
+ u32 htotal, hsstart, pel_msk;
if (bpp != 0) {
info->fix.ypanstep = 1;
@@ -617,9 +621,11 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- if (bpp == 4) {
+ if (bpp == 4 && (info->var.nonstd & 1) != 0) {
+ int i;
bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
- set_bit(8 - 1, info->pixmap.blit_x);
+ for (i = 8; i <= FB_MAX_BLIT_WIDTH; i += 8)
+ set_bit(i - 1, info->pixmap.blit_x);
} else {
bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
}
@@ -730,7 +736,7 @@ static int s3fb_set_par(struct fb_info *info)
vga_wcrt(par->state.vgabase, 0x50, 0x00);
vga_wcrt(par->state.vgabase, 0x67, 0x50);
msleep(10); /* screen remains blank sometimes without this */
- vga_wcrt(par->state.vgabase, 0x63, (mode <= 2) ? 0x90 : 0x09);
+ vga_wcrt(par->state.vgabase, 0x63, (mode <= 4) ? 0x90 : 0x09);
vga_wcrt(par->state.vgabase, 0x66, 0x90);
}
@@ -763,12 +769,17 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x31, 0x00, 0x40);
multiplex = 0;
hmul = 1;
+ pel_msk = 0xff;
+
+ svga_wcrt_mask(par->state.vgabase, 0x08, 0x00, 0x60);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x00, 0x60);
/* Set mode-specific register values */
switch (mode) {
case 0:
fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
+ pel_msk = 0x0f;
/* Set additional registers like in 8-bit mode */
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
@@ -783,8 +794,11 @@ static int s3fb_set_par(struct fb_info *info)
}
break;
case 1:
- fb_dbg(info, "4 bit pseudocolor\n");
- vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
+ fb_dbg(info, "1 bit pseudocolor\n");
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x10, 0x14);
+ svga_wcrt_mask(par->state.vgabase, 0x08, 0x60, 0x60);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60);
+ pel_msk = 0x01;
/* Set additional registers like in 8-bit mode */
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
@@ -794,7 +808,13 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 2:
- fb_dbg(info, "4 bit pseudocolor, planar\n");
+ fb_dbg(info, "2 bit pseudocolor\n");
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x04, 0x14);
+ svga_wseq_mask(par->state.vgabase, 0x04, 0x08, 0x08);
+ vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x08, 0x20, 0x60);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60);
+ pel_msk = 0x03;
/* Set additional registers like in 8-bit mode */
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
@@ -804,8 +824,35 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 3:
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
+ pel_msk = 0x0f;
+
+ /* Set additional registers like in 8-bit mode */
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60);
+
+ /* disable enhanced mode */
+ svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
+ break;
+ case 4:
+ fb_dbg(info, "4 bit pseudocolor\n");
+ vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
+ svga_wattr(par->state.vgabase, 0x33, 0x01);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x40, 0x60);
+ pel_msk = 0xf0;
+
+ /* Set additional registers like in 8-bit mode */
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
+
+ /* disable enhanced mode */
+ svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
+ break;
+ case 5:
fb_dbg(info, "8 bit pseudocolor\n");
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x20, 0x60);
if (info->var.pixclock > 20000 ||
par->chip == CHIP_357_VIRGE_GX2 ||
par->chip == CHIP_359_VIRGE_GX2P ||
@@ -819,7 +866,7 @@ static int s3fb_set_par(struct fb_info *info)
multiplex = 1;
}
break;
- case 4:
+ case 6:
fb_dbg(info, "5/5/5 truecolor\n");
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
@@ -847,7 +894,7 @@ static int s3fb_set_par(struct fb_info *info)
hmul = 2;
}
break;
- case 5:
+ case 7:
fb_dbg(info, "5/6/5 truecolor\n");
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
@@ -875,12 +922,12 @@ static int s3fb_set_par(struct fb_info *info)
hmul = 2;
}
break;
- case 6:
+ case 8:
/* VIRGE VX case */
fb_dbg(info, "8/8/8 truecolor\n");
svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
- case 7:
+ case 9:
fb_dbg(info, "8/8/8/8 truecolor\n");
svga_wcrt_mask(par->state.vgabase, 0x50, 0x30, 0x30);
svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
@@ -889,6 +936,7 @@ static int s3fb_set_par(struct fb_info *info)
fb_err(info, "unsupported mode - bug\n");
return -EINVAL;
}
+ vga_w(par->state.vgabase, VGA_PEL_MSK, pel_msk);
if (par->chip != CHIP_988_VIRGE_VX) {
svga_wseq_mask(par->state.vgabase, 0x15, multiplex ? 0x10 : 0x00, 0x10);
@@ -927,33 +975,26 @@ static int s3fb_set_par(struct fb_info *info)
static int s3fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *fb)
{
+ struct s3fb_info *par = fb->par;
+ int cols;
+
switch (fb->var.bits_per_pixel) {
case 0:
+ case 1:
+ case 2:
case 4:
- if (regno >= 16)
- return -EINVAL;
-
- if ((fb->var.bits_per_pixel == 4) &&
- (fb->var.nonstd == 0)) {
- outb(0xF0, VGA_PEL_MSK);
- outb(regno*16, VGA_PEL_IW);
- } else {
- outb(0x0F, VGA_PEL_MSK);
- outb(regno, VGA_PEL_IW);
- }
- outb(red >> 10, VGA_PEL_D);
- outb(green >> 10, VGA_PEL_D);
- outb(blue >> 10, VGA_PEL_D);
- break;
case 8:
- if (regno >= 256)
+ cols = 1 << (fb->var.bits_per_pixel ? fb->var.bits_per_pixel : 4);
+ if (regno >= cols)
return -EINVAL;
- outb(0xFF, VGA_PEL_MSK);
- outb(regno, VGA_PEL_IW);
- outb(red >> 10, VGA_PEL_D);
- outb(green >> 10, VGA_PEL_D);
- outb(blue >> 10, VGA_PEL_D);
+ if ((fb->var.bits_per_pixel == 4) && ((fb->var.nonstd & 1) == 0))
+ regno <<= 4;
+
+ vga_w(par->state.vgabase, VGA_PEL_IW, regno);
+ vga_w(par->state.vgabase, VGA_PEL_D, red >> 10);
+ vga_w(par->state.vgabase, VGA_PEL_D, green >> 10);
+ vga_w(par->state.vgabase, VGA_PEL_D, blue >> 10);
break;
case 16:
if (regno >= 16)
@@ -988,34 +1029,30 @@ static int s3fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static int s3fb_blank(int blank_mode, struct fb_info *info)
{
struct s3fb_info *par = info->par;
+ u8 data;
+
+ data = (blank_mode == FB_BLANK_UNBLANK) ? 0x00 : 0x20;
+ svga_wseq_mask(par->state.vgabase, 0x01, data, 0x20);
+ svga_wseq_mask(par->state.vgabase, 0x18, data, 0x20);
switch (blank_mode) {
- case FB_BLANK_UNBLANK:
- fb_dbg(info, "unblank\n");
- svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
- svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
- break;
- case FB_BLANK_NORMAL:
- fb_dbg(info, "blank\n");
- svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
- svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ default:
+ data = 0x00;
break;
case FB_BLANK_HSYNC_SUSPEND:
- fb_dbg(info, "hsync\n");
- svga_wcrt_mask(par->state.vgabase, 0x56, 0x02, 0x06);
- svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ data = 0x02;
break;
case FB_BLANK_VSYNC_SUSPEND:
- fb_dbg(info, "vsync\n");
- svga_wcrt_mask(par->state.vgabase, 0x56, 0x04, 0x06);
- svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ data = 0x04;
break;
case FB_BLANK_POWERDOWN:
- fb_dbg(info, "sync down\n");
- svga_wcrt_mask(par->state.vgabase, 0x56, 0x06, 0x06);
- svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ data = 0x06;
break;
}
+ svga_wcrt_mask(par->state.vgabase, 0x56, data, 0x06);
+
+ data = (blank_mode == FB_BLANK_POWERDOWN) ? 0x01 : 0x00;
+ svga_wseq_mask(par->state.vgabase, 0x14, data, 0x01);
return 0;
}
@@ -1045,6 +1082,33 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
+/* Get capabilities of accelerator based on the mode */
+
+static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
+ struct fb_var_screeninfo *var)
+{
+ int i;
+
+ if (var->bits_per_pixel == 0) {
+ /* can only support 256 8x16 bitmap */
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, caps->y);
+ caps->len = 256;
+ } else {
+ if (var->bits_per_pixel == 4 && (var->nonstd & 1) != 0) {
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ for (i = 8; i <= FB_MAX_BLIT_WIDTH; i += 8)
+ set_bit(i - 1, caps->x);
+ } else {
+ bitmap_fill(caps->x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(caps->y, FB_MAX_BLIT_HEIGHT);
+ caps->len = ~(u32)0;
+ }
+}
+
/* ------------------------------------------------------------------------- */
/* Frame buffer operations */
@@ -1063,7 +1127,7 @@ static const struct fb_ops s3fb_ops = {
.fb_copyarea = cfb_copyarea,
.fb_imageblit = s3fb_imageblit,
__FB_DEFAULT_IOMEM_OPS_MMAP,
- .fb_get_caps = svga_get_caps,
+ .fb_get_caps = s3fb_get_caps,
};
/* ------------------------------------------------------------------------- */
@@ -1445,6 +1509,8 @@ static int __maybe_unused s3_pci_suspend(struct device *dev)
}
fb_set_suspend(info, 1);
+ svga_wseq_mask(par->state.vgabase, 0x18, 0x20, 0x20);
+ svga_wseq_mask(par->state.vgabase, 0x14, 0x01, 0x01);
mutex_unlock(&(par->open_lock));
console_unlock();
@@ -1471,6 +1537,9 @@ static int __maybe_unused s3_pci_resume(struct device *dev)
return 0;
}
+ vga_wseq(par->state.vgabase, 0x08, 0x06);
+ svga_wseq_mask(par->state.vgabase, 0x18, 0x00, 0x20);
+ svga_wseq_mask(par->state.vgabase, 0x14, 0x00, 0x01);
s3fb_set_par(info);
fb_set_suspend(info, 0);
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 1893815dc67f..6acf5a00c2ba 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -93,6 +93,7 @@ struct simplefb_par {
static void simplefb_clocks_destroy(struct simplefb_par *par);
static void simplefb_regulators_destroy(struct simplefb_par *par);
+static void simplefb_detach_genpds(void *res);
/*
* fb_ops.fb_destroy is called by the last put_fb_info() call at the end
@@ -105,6 +106,7 @@ static void simplefb_destroy(struct fb_info *info)
simplefb_regulators_destroy(info->par);
simplefb_clocks_destroy(info->par);
+ simplefb_detach_genpds(info->par);
if (info->screen_base)
iounmap(info->screen_base);
@@ -445,13 +447,14 @@ static void simplefb_detach_genpds(void *res)
if (!IS_ERR_OR_NULL(par->genpds[i]))
dev_pm_domain_detach(par->genpds[i], true);
}
+ par->num_genpds = 0;
}
static int simplefb_attach_genpds(struct simplefb_par *par,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- unsigned int i;
+ unsigned int i, num_genpds;
int err;
err = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -465,26 +468,35 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
return err;
}
- par->num_genpds = err;
+ num_genpds = err;
/*
* Single power-domain devices are handled by the driver core, so
* nothing to do here.
*/
- if (par->num_genpds <= 1)
+ if (num_genpds <= 1) {
+ par->num_genpds = num_genpds;
return 0;
+ }
- par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds),
+ par->genpds = devm_kcalloc(dev, num_genpds, sizeof(*par->genpds),
GFP_KERNEL);
if (!par->genpds)
return -ENOMEM;
- par->genpd_links = devm_kcalloc(dev, par->num_genpds,
+ par->genpd_links = devm_kcalloc(dev, num_genpds,
sizeof(*par->genpd_links),
GFP_KERNEL);
if (!par->genpd_links)
return -ENOMEM;
+ /*
+ * Set par->num_genpds only after genpds and genpd_links are allocated
+ * to exit early from simplefb_detach_genpds() without full
+ * initialisation.
+ */
+ par->num_genpds = num_genpds;
+
for (i = 0; i < par->num_genpds; i++) {
par->genpds[i] = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR(par->genpds[i])) {
@@ -506,9 +518,10 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
dev_warn(dev, "failed to link power-domain %u\n", i);
}
- return devm_add_action_or_reset(dev, simplefb_detach_genpds, par);
+ return 0;
}
#else
+static void simplefb_detach_genpds(void *res) { }
static int simplefb_attach_genpds(struct simplefb_par *par,
struct platform_device *pdev)
{
@@ -622,18 +635,20 @@ static int simplefb_probe(struct platform_device *pdev)
ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size);
if (ret) {
dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret);
- goto error_regulators;
+ goto error_genpds;
}
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
- goto error_regulators;
+ goto error_genpds;
}
dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
return 0;
+error_genpds:
+ simplefb_detach_genpds(par);
error_regulators:
simplefb_regulators_destroy(par);
error_clocks:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index c90f48ebb15e..d8f3bfb2dd6c 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -390,7 +390,7 @@ static int xenfb_probe(struct xenbus_device *dev,
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- info->gfns = vmalloc(array_size(sizeof(unsigned long), info->nr_pages));
+ info->gfns = vmalloc_array(info->nr_pages, sizeof(unsigned long));
if (!info->gfns)
goto error_nomem;