diff options
Diffstat (limited to 'drivers/gpu')
102 files changed, 701 insertions, 414 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a0df4cabb99..6f5b4a0e0a34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1290,6 +1290,7 @@ struct amdgpu_device {  	bool                            debug_disable_gpu_ring_reset;  	bool                            debug_vm_userptr;  	bool                            debug_disable_ce_logs; +	bool                            debug_enable_ce_cs;  	/* Protection for the following isolation structure */  	struct mutex                    enforce_isolation_mutex; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 83020963dfde..a2ca9acf8c4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2329,10 +2329,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)  int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,  					  struct kfd_vm_fault_info *mem)  { -	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { +	if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {  		*mem = *adev->gmc.vm_fault_info; -		mb(); /* make sure read happened */ -		atomic_set(&adev->gmc.vm_fault_info_updated, 0); +		atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);  	}  	return 0;  } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index ef996493115f..425a3e564360 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h index bcb97d245673..353421807387 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9cd7741d2254..2f6a96af7fb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -364,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,  	if (p->uf_bo && ring->funcs->no_user_fence)  		return -EINVAL; +	if (!p->adev->debug_enable_ce_cs && +	    chunk_ib->flags & AMDGPU_IB_FLAG_CE) { +		dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n"); +		return -EINVAL; +	} +  	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&  	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {  		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) @@ -702,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,  	 */  	const s64 us_upper_bound = 200000; -	if (!adev->mm_stats.log2_max_MBps) { +	if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {  		*max_bytes = 0;  		*max_vis_bytes = 0;  		return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7a899fb4de29..3d032c4e2dce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1882,6 +1882,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device  static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)  { +	/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4. +	 * It's unclear if this is a platform-specific or GPU-specific issue. +	 * Disable ASPM on SI for the time being. +	 */ +	if (adev->family == AMDGPU_FAMILY_SI) +		return true; +  #if IS_ENABLED(CONFIG_X86)  	struct cpuinfo_x86 *c = &cpu_data(0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 73401f0aeb34..dd7b2b796427 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1033,7 +1033,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,  	/* Until a uniform way is figured, get mask based on hwid */  	switch (hw_id) {  	case VCN_HWID: -		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; +		/* VCN vs UVD+VCE */ +		if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) +			harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;  		break;  	case DMU_HWID:  		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) @@ -2565,7 +2567,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega10_reg_base_init(adev);  		adev->sdma.num_instances = 2; +		adev->sdma.sdma_mask = 3;  		adev->gmc.num_umc = 4; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); @@ -2592,7 +2596,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega10_reg_base_init(adev);  		adev->sdma.num_instances = 2; +		adev->sdma.sdma_mask = 3;  		adev->gmc.num_umc = 4; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); @@ -2619,8 +2625,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega10_reg_base_init(adev);  		adev->sdma.num_instances = 1; +		adev->sdma.sdma_mask = 1;  		adev->vcn.num_vcn_inst = 1;  		adev->gmc.num_umc = 2; +		adev->gfx.xcc_mask = 1;  		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {  			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);  			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); @@ -2665,7 +2673,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega20_reg_base_init(adev);  		adev->sdma.num_instances = 2; +		adev->sdma.sdma_mask = 3;  		adev->gmc.num_umc = 8; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); @@ -2693,8 +2703,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		arct_reg_base_init(adev);  		adev->sdma.num_instances = 8; +		adev->sdma.sdma_mask = 0xff;  		adev->vcn.num_vcn_inst = 2;  		adev->gmc.num_umc = 8; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); @@ -2726,8 +2738,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		aldebaran_reg_base_init(adev);  		adev->sdma.num_instances = 5; +		adev->sdma.sdma_mask = 0x1f;  		adev->vcn.num_vcn_inst = 2;  		adev->gmc.num_umc = 4; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); @@ -2762,6 +2776,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		} else {  			cyan_skillfish_reg_base_init(adev);  			adev->sdma.num_instances = 2; +			adev->sdma.sdma_mask = 3; +			adev->gfx.xcc_mask = 1;  			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);  			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);  			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bff25ef3e2d0..61268aa82df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK {  	AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),  	AMDGPU_DEBUG_SMU_POOL = BIT(7),  	AMDGPU_DEBUG_VM_USERPTR = BIT(8), -	AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) +	AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9), +	AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)  };  unsigned int amdgpu_vram_limit = UINT_MAX; @@ -2289,6 +2290,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)  		pr_info("debug: disable kernel logs of correctable errors\n");  		adev->debug_disable_ce_logs = true;  	} + +	if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) { +		pr_info("debug: allowing command submission to CE engine\n"); +		adev->debug_enable_ce_cs = true; +	}  }  static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index fd8cca241da6..18a7829122d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -758,11 +758,42 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)   * @fence: fence of the ring to signal   *   */ -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)  { -	dma_fence_set_error(&fence->base, -ETIME); -	amdgpu_fence_write(fence->ring, fence->seq); -	amdgpu_fence_process(fence->ring); +	struct dma_fence *unprocessed; +	struct dma_fence __rcu **ptr; +	struct amdgpu_fence *fence; +	struct amdgpu_ring *ring = af->ring; +	unsigned long flags; +	u32 seq, last_seq; + +	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; +	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; + +	/* mark all fences from the guilty context with an error */ +	spin_lock_irqsave(&ring->fence_drv.lock, flags); +	do { +		last_seq++; +		last_seq &= ring->fence_drv.num_fences_mask; + +		ptr = &ring->fence_drv.fences[last_seq]; +		rcu_read_lock(); +		unprocessed = rcu_dereference(*ptr); + +		if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { +			fence = container_of(unprocessed, struct amdgpu_fence, base); + +			if (fence == af) +				dma_fence_set_error(&fence->base, -ETIME); +			else if (fence->context == af->context) +				dma_fence_set_error(&fence->base, -ECANCELED); +		} +		rcu_read_unlock(); +	} while (last_seq != seq); +	spin_unlock_irqrestore(&ring->fence_drv.lock, flags); +	/* signal the guilty fence */ +	amdgpu_fence_write(ring, af->seq); +	amdgpu_fence_process(ring);  }  void amdgpu_fence_save_wptr(struct dma_fence *fence) @@ -790,14 +821,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,  	struct dma_fence *unprocessed;  	struct dma_fence __rcu **ptr;  	struct amdgpu_fence *fence; -	u64 wptr, i, seqno; +	u64 wptr; +	u32 seq, last_seq; -	seqno = amdgpu_fence_read(ring); +	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; +	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;  	wptr = ring->fence_drv.signalled_wptr;  	ring->ring_backup_entries_to_copy = 0; -	for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { -		ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; +	do { +		last_seq++; +		last_seq &= ring->fence_drv.num_fences_mask; + +		ptr = &ring->fence_drv.fences[last_seq];  		rcu_read_lock();  		unprocessed = rcu_dereference(*ptr); @@ -813,7 +849,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,  			wptr = fence->wptr;  		}  		rcu_read_unlock(); -	} +	} while (last_seq != seq);  }  /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6b7d66b6d4cc..63ee6ba6a931 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -371,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)  	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {  		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {  			ring = &adev->jpeg.inst[i].ring_dec[j]; -			if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j))) +			if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))  				ring->sched.ready = true;  			else  				ring->sched.ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a9327472c651..b3e6b3fcdf2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);  		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;  	case AMDGPU_INFO_VRAM_USAGE: -		ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); +		ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? +			ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;  		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;  	case AMDGPU_INFO_VIS_VRAM_USAGE:  		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); @@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  		mem.vram.usable_heap_size = adev->gmc.real_vram_size -  			atomic64_read(&adev->vram_pin_size) -  			AMDGPU_VM_RESERVED_VRAM; -		mem.vram.heap_usage = -			ttm_resource_manager_usage(vram_man); +		mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? +				ttm_resource_manager_usage(vram_man) : 0;  		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;  		mem.cpu_accessible_vram.total_heap_size = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 5bf9be073cdd..4883adcfbb4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -409,7 +409,7 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,  		return -EINVAL;  	/* Clear the doorbell array before detection */ -	memset(adev->mes.hung_queue_db_array_cpu_addr, 0, +	memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,  		adev->mes.hung_queue_db_array_size * sizeof(u32));  	input.queue_type = queue_type;  	input.detect_only = detect_only; @@ -420,12 +420,17 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,  		dev_err(adev->dev, "failed to detect and reset\n");  	} else {  		*hung_db_num = 0; -		for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) { +		for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {  			if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {  				hung_db_array[i] = db_array[i];  				*hung_db_num += 1;  			}  		} + +		/* +		 * TODO: return HQD info for MES scheduled user compute queue reset cases +		 * stored in hung_db_array hqd info offset to full array size +		 */  	}  	return r; @@ -686,14 +691,11 @@ out:  bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)  {  	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; -	bool is_supported = false; - -	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && -	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && -	    mes_rev >= 0x63) -		is_supported = true; -	return is_supported; +	return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && +		 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && +		 mes_rev >= 0x63) || +		amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));  }  /* Fix me -- node_id is used to identify the correct MES instances in the future */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 6b506fc72f58..97c137c90f97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -149,6 +149,7 @@ struct amdgpu_mes {  	void                *resource_1_addr[AMDGPU_MAX_MES_PIPES];  	int				hung_queue_db_array_size; +	int				hung_queue_hqd_info_offset;  	struct amdgpu_bo		*hung_queue_db_array_gpu_obj;  	uint64_t			hung_queue_db_array_gpu_addr;  	void				*hung_queue_db_array_cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 8f6ce948c684..5ec5c3ff22bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -811,7 +811,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,  	if (r)  		return r; -	/* signal the fence of the bad job */ +	/* signal the guilty fence and set an error on all fences from the context */  	if (guilty_fence)  		amdgpu_fence_driver_guilty_force_completion(guilty_fence);  	/* Re-emit the non-guilty commands */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index b6b649179776..4b46e3c26ff3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -155,7 +155,7 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;  void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);  void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);  void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);  void amdgpu_fence_save_wptr(struct dma_fence *fence);  int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3328ab63376b..f96beb96c75c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -598,8 +598,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)  	vf2pf_info->driver_cert = 0;  	vf2pf_info->os_info.all = 0; -	vf2pf_info->fb_usage = -		ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; +	vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? +		 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;  	vf2pf_info->fb_vis_usage =  		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;  	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 474bfe36c0c2..aa78c2ee9e21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)  	return 0;  } +static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) +{ +	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { +	case IP_VERSION(6, 1, 1): +		return adev->pm.fw_version < 0x0a640500; +	default: +		return false; +	} +} + +static int vpe_get_dpm_level(struct amdgpu_device *adev) +{ +	struct amdgpu_vpe *vpe = &adev->vpe; + +	if (!adev->pm.dpm_enabled) +		return 0; + +	return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); +} +  static void vpe_idle_work_handler(struct work_struct *work)  {  	struct amdgpu_device *adev = @@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)  	unsigned int fences = 0;  	fences += amdgpu_fence_count_emitted(&adev->vpe.ring); +	if (fences) +		goto reschedule; -	if (fences == 0) -		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); -	else -		schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +	if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) +		goto reschedule; + +	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); +	return; + +reschedule: +	schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);  }  static int vpe_common_init(struct amdgpu_vpe *vpe) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a5adb2ed9b3c..9d934c07fa6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,  	    !adev->gmc.vram_vendor)  		return 0; +	if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) +		return 0; +  	return attr->mode;  } diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c index 96616a865aac..ed1e25661706 100644 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT  /*   * Copyright 2018 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 66c47c466532..d61eb9f187c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5862,8 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,  	unsigned vmid = AMDGPU_JOB_GET_VMID(job);  	u32 header, control = 0; -	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); -  	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);  	control |= ib->length_dw | (vmid << 24); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 710ec9c34e43..93fde0f9af87 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4419,8 +4419,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,  	unsigned vmid = AMDGPU_JOB_GET_VMID(job);  	u32 header, control = 0; -	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); -  	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);  	control |= ib->length_dw | (vmid << 24); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 93d7ccb7d013..0e5e54d0a9a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)  					GFP_KERNEL);  	if (!adev->gmc.vm_fault_info)  		return -ENOMEM; -	atomic_set(&adev->gmc.vm_fault_info_updated, 0); +	atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);  	return 0;  } @@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,  	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,  			     VMID);  	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) -		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) { +		&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {  		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;  		u32 protections = REG_GET_FIELD(status,  					VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,  		info->prot_read = protections & 0x8 ? true : false;  		info->prot_write = protections & 0x10 ? true : false;  		info->prot_exec = protections & 0x20 ? true : false; -		mb(); -		atomic_set(&adev->gmc.vm_fault_info_updated, 1); +		atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);  	}  	return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c5e2a2c41e06..e1509480dfc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)  					GFP_KERNEL);  	if (!adev->gmc.vm_fault_info)  		return -ENOMEM; -	atomic_set(&adev->gmc.vm_fault_info_updated, 0); +	atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);  	return 0;  } @@ -1478,7 +1478,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,  	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,  			     VMID);  	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) -		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) { +		&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {  		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;  		u32 protections = REG_GET_FIELD(status,  					VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1494,8 +1494,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,  		info->prot_read = protections & 0x8 ? true : false;  		info->prot_write = protections & 0x10 ? true : false;  		info->prot_exec = protections & 0x20 ? true : false; -		mb(); -		atomic_set(&adev->gmc.vm_fault_info_updated, 1); +		atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);  	}  	return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 2db9b2c63693..1cd9eaeef38f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -208,10 +208,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,  	struct amdgpu_userq_mgr *uqm, *tmp;  	unsigned int hung_db_num = 0;  	int queue_id, r, i; -	u32 db_array[4]; +	u32 db_array[8]; -	if (db_array_size > 4) { -		dev_err(adev->dev, "DB array size (%d vs 4) too small\n", +	if (db_array_size > 8) { +		dev_err(adev->dev, "DB array size (%d vs 8) too small\n",  			db_array_size);  		return -EINVAL;  	} diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e82188431f79..da575bb1377f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -66,7 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);  #define GFX_MES_DRAM_SIZE	0x80000  #define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE) -#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */ +#define MES11_HUNG_HQD_INFO_OFFSET	4  static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)  { @@ -1720,8 +1721,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)  	struct amdgpu_device *adev = ip_block->adev;  	int pipe, r; -	adev->mes.hung_queue_db_array_size = -		MES11_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET; +  	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {  		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)  			continue; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index aff06f06aeee..7f3512d9de07 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -47,7 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);  #define MES_EOP_SIZE   2048 -#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */ +#define MES12_HUNG_HQD_INFO_OFFSET	4  static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)  { @@ -228,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,  			pipe, x_pkt->header.opcode);  	r = amdgpu_fence_wait_polling(ring, seq, timeout); -	if (r < 1 || !*status_ptr) { + +	/* +	 * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success). +	 * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information. +	 */ +	if (r < 1 || !(lower_32_bits(*status_ptr))) {  		if (misc_op_str)  			dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", @@ -1899,8 +1905,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)  	struct amdgpu_device *adev = ip_block->adev;  	int pipe, r; -	adev->mes.hung_queue_db_array_size = -		MES12_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET; +  	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {  		r = amdgpu_mes_init_microcode(adev, pipe);  		if (r) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6c5c7c1bf5ed..6e7bc983fc0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,  	pr_debug_ratelimited("Evicting process pid %d queues\n",  			    pdd->process->lead_thread->pid); +	if (dqm->dev->kfd->shared_resources.enable_mes) { +		pdd->last_evict_timestamp = get_jiffies_64(); +		retval = suspend_all_queues_mes(dqm); +		if (retval) { +			dev_err(dev, "Suspending all queues failed"); +			goto out; +		} +	} +  	/* Mark all queues as evicted. Deactivate all active queues on  	 * the qpd.  	 */ @@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,  		decrement_queue_count(dqm, qpd, q);  		if (dqm->dev->kfd->shared_resources.enable_mes) { -			int err; - -			err = remove_queue_mes(dqm, q, qpd); -			if (err) { +			retval = remove_queue_mes(dqm, q, qpd); +			if (retval) {  				dev_err(dev, "Failed to evict queue %d\n",  					q->properties.queue_id); -				retval = err; +				goto out;  			}  		}  	} -	pdd->last_evict_timestamp = get_jiffies_64(); -	if (!dqm->dev->kfd->shared_resources.enable_mes) + +	if (!dqm->dev->kfd->shared_resources.enable_mes) { +		pdd->last_evict_timestamp = get_jiffies_64();  		retval = execute_queues_cpsch(dqm,  					      qpd->is_debug ?  					      KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :  					      KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,  					      USE_DEFAULT_GRACE_PERIOD); +	} else { +		retval = resume_all_queues_mes(dqm); +		if (retval) +			dev_err(dev, "Resuming all queues failed"); +	}  out:  	dqm_unlock(dqm); @@ -3098,61 +3111,17 @@ out:  	return ret;  } -static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm, -				   struct qcm_process_device *qpd) -{ -	struct device *dev = dqm->dev->adev->dev; -	int ret = 0; - -	/* Check if process is already evicted */ -	dqm_lock(dqm); -	if (qpd->evicted) { -		/* Increment the evicted count to make sure the -		 * process stays evicted before its terminated. -		 */ -		qpd->evicted++; -		dqm_unlock(dqm); -		goto out; -	} -	dqm_unlock(dqm); - -	ret = suspend_all_queues_mes(dqm); -	if (ret) { -		dev_err(dev, "Suspending all queues failed"); -		goto out; -	} - -	ret = dqm->ops.evict_process_queues(dqm, qpd); -	if (ret) { -		dev_err(dev, "Evicting process queues failed"); -		goto out; -	} - -	ret = resume_all_queues_mes(dqm); -	if (ret) -		dev_err(dev, "Resuming all queues failed"); - -out: -	return ret; -} -  int kfd_evict_process_device(struct kfd_process_device *pdd)  {  	struct device_queue_manager *dqm;  	struct kfd_process *p; -	int ret = 0;  	p = pdd->process;  	dqm = pdd->dev->dqm;  	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); -	if (dqm->dev->kfd->shared_resources.enable_mes) -		ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd); -	else -		ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); - -	return ret; +	return dqm->ops.evict_process_queues(dqm, &pdd->qpd);  }  int reserve_debug_trap_vmid(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0d03e324d5b9..bfa3199591b6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,  	struct dc_stream_state *stream,  	struct dc_crtc_timing_adjust *adjust)  { -	struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); +	struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);  	if (!offload_work) {  		drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");  		return;  	} -	struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL); +	struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);  	if (!adjust_copy) {  		drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");  		kfree(offload_work); @@ -2085,8 +2085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	dc_hardware_init(adev->dm.dc); -	adev->dm.restore_backlight = true; -  	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);  	if (!adev->dm.hpd_rx_offload_wq) {  		drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); @@ -3442,7 +3440,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)  		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);  		dc_resume(dm->dc); -		adev->dm.restore_backlight = true;  		amdgpu_dm_irq_resume_early(adev); @@ -9969,6 +9966,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,  	bool mode_set_reset_required = false;  	u32 i;  	struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; +	bool set_backlight_level = false;  	/* Disable writeback */  	for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -10088,6 +10086,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,  			acrtc->hw_mode = new_crtc_state->mode;  			crtc->hwmode = new_crtc_state->mode;  			mode_set_reset_required = true; +			set_backlight_level = true;  		} else if (modereset_required(new_crtc_state)) {  			drm_dbg_atomic(dev,  				       "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -10144,16 +10143,13 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,  	 * to fix a flicker issue.  	 * It will cause the dm->actual_brightness is not the current panel brightness  	 * level. (the dm->brightness is the correct panel level) -	 * So we set the backlight level with dm->brightness value after initial -	 * set mode. Use restore_backlight flag to avoid setting backlight level -	 * for every subsequent mode set. +	 * So we set the backlight level with dm->brightness value after set mode  	 */ -	if (dm->restore_backlight) { +	if (set_backlight_level) {  		for (i = 0; i < dm->num_of_edps; i++) {  			if (dm->backlight_dev[i])  				amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);  		} -		dm->restore_backlight = false;  	}  } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 009f206226f0..db75e991ac7b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -631,13 +631,6 @@ struct amdgpu_display_manager {  	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];  	/** -	 * @restore_backlight: -	 * -	 * Flag to indicate whether to restore backlight after modeset. -	 */ -	bool restore_backlight; - -	/**  	 * @aux_hpd_discon_quirk:  	 *  	 * quirk for hpd discon while aux is on-going. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 1ec9d03ad747..38f9ea313dcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)  	struct vblank_control_work *vblank_work =  		container_of(work, struct vblank_control_work, work);  	struct amdgpu_display_manager *dm = vblank_work->dm; +	struct amdgpu_device *adev = drm_to_adev(dm->ddev); +	int r;  	mutex_lock(&dm->dc_lock); @@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)  	if (dm->active_vblank_irq_count == 0) {  		dc_post_update_surfaces_to_stream(dm->dc); + +		r = amdgpu_dpm_pause_power_profile(adev, true); +		if (r) +			dev_warn(adev->dev, "failed to set default power profile mode\n"); +  		dc_allow_idle_optimizations(dm->dc, true); + +		r = amdgpu_dpm_pause_power_profile(adev, false); +		if (r) +			dev_warn(adev->dev, "failed to restore the power profile mode\n");  	}  	mutex_unlock(&dm->dc_lock); @@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)  	int irq_type;  	int rc = 0; -	if (acrtc->otg_inst == -1) -		goto skip; +	if (enable && !acrtc->base.enabled) { +		drm_dbg_vbl(crtc->dev, +				"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", +				acrtc->crtc_id, acrtc->base.enabled); +		return -EINVAL; +	}  	irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); @@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)  			return rc;  	}  #endif -skip: +  	if (amdgpu_in_reset(adev))  		return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fe100e4c9801..cc21337a182f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct  		edid_caps->panel_patch.remove_sink_ext_caps = true;  		break;  	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): +	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):  		drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);  		edid_caps->panel_patch.disable_colorimetry = true;  		break; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 09be2a90cc79..4f569cd8a5d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(  			dpp_base->ctx->dc->optimized_required = true;  			dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;  		} -	} else { -		REG_SET(CM_MEM_PWR_CTRL, 0, -				BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 7c276c319086..ce3d0b45fb4c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)  		 */  		struct dc_link *link = dc->links[i]; +		if (link->ep_type != DISPLAY_ENDPOINT_PHY) +			continue; +  		link->link_enc->funcs->hw_init(link->link_enc);  		/* Check for enabled DIG to identify enabled display */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 41c76ba9ba56..62a39204fe0b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -44,7 +44,13 @@   */  #define MAX_PIPES 6  #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) -#define MAX_LINKS (MAX_PIPES * 2 +2) + +#define MAX_DPIA 6 +#define MAX_CONNECTOR 6 +#define MAX_VIRTUAL_LINKS 4 + +#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) +  #define MAX_DIG_LINK_ENCODERS 7  #define MAX_DWB_PIPES	1  #define MAX_HPO_DP2_ENCODERS	4 diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 9e33bf937a69..2676ae9f6fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,  	struct audio_output audio_output[MAX_PIPES];  	struct dc_stream_state *streams_on_link[MAX_PIPES];  	int num_streams_on_link = 0; +	struct dc *dc = (struct dc *)link->dc;  	needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=  	link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); @@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,  		if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {  			stream_update.stream = streams_on_link[i];  			stream_update.dpms_off = &dpms_off; -			dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update); +			dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);  		}  	}  } diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h index 086869264425..a252ee4c7874 100644 --- a/drivers/gpu/drm/amd/include/amd_cper.h +++ b/drivers/gpu/drm/amd/include/amd_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h index 64b553e7de1a..e7fdcee22a71 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index cf9932e68055..3a9522c17fee 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3500,6 +3500,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,  	 * for these GPUs to calculate bandwidth requirements.  	 */  	if (high_pixelclock_count) { +		/* Work around flickering lines at the bottom edge +		 * of the screen when using a single 4K 60Hz monitor. +		 */ +		disable_mclk_switching = true; +  		/* On Oland, we observe some flickering when two 4K 60Hz  		 * displays are connected, possibly because voltage is too low.  		 * Raise the voltage by requiring a higher SCLK. diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 8da882c51856..9b28c0728269 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,  		thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *  			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;  	else if (hwmgr->pp_table_version == PP_TABLE_V0) -		thermal_data->max = data->thermal_temp_setting.temperature_shutdown * -			PP_TEMPERATURE_UNITS_PER_CENTIGRADES; +		thermal_data->max = data->thermal_temp_setting.temperature_shutdown;  	thermal_data->sw_ctf_threshold = thermal_data->max; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index d2dbd90bb427..0a876c840c79 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)  	table->VoltageResponseTime = 0;  	table->PhaseResponseTime = 0;  	table->MemoryThermThrottleEnable = 1; -	table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/ +	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);  	table->PCIeGenInterval = 1;  	table->VRConfig = 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 1f50f1e74c48..aa3ae9b115c4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)  	table->VoltageResponseTime  = 0;  	table->PhaseResponseTime  = 0;  	table->MemoryThermThrottleEnable  = 1; -	table->PCIeBootLinkLevel = 0; +	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);  	table->PCIeGenInterval = 1;  	result = iceland_populate_smc_svi2_config(hwmgr, table); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index f532f7c69259..a8961a8f5c42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,  						      table_index);  	uint32_t table_size;  	int ret = 0; -	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) +	if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)  		return -EINVAL;  	table_size = smu_table->tables[table_index].size; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c15aef014f69..d41bd876167c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)  	__ast_write8(addr, reg + 1, val);  } -static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,  					 u8 val)  { -	u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); +	u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask); -	tmp |= val; -	__ast_write8_i(addr, reg, index, tmp); +	val &= ~preserve_mask; +	__ast_write8_i(addr, reg, index, tmp | val);  }  static inline u32 ast_read32(struct ast_device *ast, u32 reg) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index b4e8edc7c767..30b011ed0a05 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -836,22 +836,24 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,  static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)  {  	struct ast_device *ast = to_ast_device(crtc->dev); +	u8 vgacr17 = 0x00; +	u8 vgacrb6 = 0xff; -	ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, 0x00); -	ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0x00); +	vgacr17 |= AST_IO_VGACR17_SYNC_ENABLE; +	vgacrb6 &= ~(AST_IO_VGACRB6_VSYNC_OFF | AST_IO_VGACRB6_HSYNC_OFF); + +	ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17); +	ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6);  }  static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)  {  	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);  	struct ast_device *ast = to_ast_device(crtc->dev); -	u8 vgacrb6; +	u8 vgacr17 = 0xff; -	ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, AST_IO_VGASR1_SD); - -	vgacrb6 = AST_IO_VGACRB6_VSYNC_OFF | -		  AST_IO_VGACRB6_HSYNC_OFF; -	ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6); +	vgacr17 &= ~AST_IO_VGACR17_SYNC_ENABLE; +	ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17);  	/*  	 * HW cursors require the underlying primary plane and CRTC to diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index e15adaf3a80e..30578e3b07e4 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -29,6 +29,7 @@  #define AST_IO_VGAGRI			(0x4E)  #define AST_IO_VGACRI			(0x54) +#define AST_IO_VGACR17_SYNC_ENABLE	BIT(7) /* called "Hardware reset" in docs */  #define AST_IO_VGACR80_PASSWORD		(0xa8)  #define AST_IO_VGACR99_VGAMEM_RSRV_MASK	GENMASK(1, 0)  #define AST_IO_VGACRA1_VGAIO_DISABLED	BIT(1) diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index 399fa7eebd49..03fc8fd10f20 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -121,8 +121,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx)  	}  	/* Test for known Chip ID. */ -	if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || -	    chipid[2] != REG_CHIPID2_VALUE) { +	if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) {  		dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",  			chipid[0], chipid[1], chipid[2]);  		return -EINVAL; diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index d502d146b177..56638814bb28 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -280,7 +280,7 @@ sanity:      GIT_STRATEGY: none    script:      # ci-fairy check-commits --junit-xml=check-commits.xml -    - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml +    # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml      - |        set -eu        image_tags=( diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c index 9dc0408fbbea..5b956229c82f 100644 --- a/drivers/gpu/drm/drm_draw.c +++ b/drivers/gpu/drm/drm_draw.c @@ -127,7 +127,7 @@ EXPORT_SYMBOL(drm_draw_fill16);  void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,  		     unsigned int height, unsigned int width, -		     u16 color) +		     u32 color)  {  	unsigned int y, x; diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h index f121ee7339dc..20cb404e23ea 100644 --- a/drivers/gpu/drm/drm_draw_internal.h +++ b/drivers/gpu/drm/drm_draw_internal.h @@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,  void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,  		     unsigned int height, unsigned int width, -		     u16 color); +		     u32 color);  void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,  		     unsigned int height, unsigned int width, diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index ebf305fb24f0..6fb55601252f 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);  void __drm_gem_reset_shadow_plane(struct drm_plane *plane,  				  struct drm_shadow_plane_state *shadow_plane_state)  { -	__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); -	drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); +	if (shadow_plane_state) { +		__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); +		drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); +	} else { +		__drm_atomic_helper_plane_reset(plane, NULL); +	}  }  EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 1d6312fa1429..d4b6ea42db0f 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)  	*p = color & 0xff;  } +/* + * Special case if the pixel crosses page boundaries + */ +static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page, +					  unsigned int offset, u32 color) +{ +	u8 *vaddr2; +	u8 *p = vaddr + offset; + +	vaddr2 = kmap_local_page_try_from_panic(next_page); + +	*p++ = color & 0xff; +	color >>= 8; + +	if (offset == PAGE_SIZE - 1) +		p = vaddr2; + +	*p++ = color & 0xff; +	color >>= 8; + +	if (offset == PAGE_SIZE - 2) +		p = vaddr2; + +	*p = color & 0xff; +	kunmap_local(vaddr2); +} +  static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)  {  	u32 *p = vaddr + offset; @@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,  					page = new_page;  					vaddr = kmap_local_page_try_from_panic(pages[page]);  				} -				if (vaddr) +				if (!vaddr) +					continue; + +				// Special case for 24bit, as a pixel might cross page boundaries +				if (cpp == 3 && offset + 3 > PAGE_SIZE) +					drm_panic_write_pixel24_xpage(vaddr, pages[page + 1], +								      offset, fg32); +				else  					drm_panic_write_pixel(vaddr, offset, fg32, cpp);  			}  		} @@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,  				page = new_page;  				vaddr = kmap_local_page_try_from_panic(pages[page]);  			} -			drm_panic_write_pixel(vaddr, offset, color, cpp); +			if (!vaddr) +				continue; + +			// Special case for 24bit, as a pixel might cross page boundaries +			if (cpp == 3 && offset + 3 > PAGE_SIZE) +				drm_panic_write_pixel24_xpage(vaddr, pages[page + 1], +							      offset, color); +			else +				drm_panic_write_pixel(vaddr, offset, color, cpp);  		}  	}  	if (vaddr) @@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f  static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,  				const struct font_desc *font, u32 fg_color)  { +	if (rect->x2 > sb->width || rect->y2 > sb->height) +		return; +  	if (logo_mono)  		drm_panic_blit(sb, rect, logo_mono->data,  			       DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color); @@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_  			       struct drm_panic_line *line, int yoffset, u32 fg_color)  {  	int chars_per_row = sb->width / font->width; -	struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height); +	struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);  	struct drm_panic_line line_wrap;  	if (line->len > chars_per_row) { @@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)  	struct drm_panic_line line;  	int yoffset; -	if (!font) +	if (!font || font->width > sb->width)  		return;  	yoffset = sb->height - font->height - (sb->height % font->height) / 2; @@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)  	pr_debug("QR width %d and scale %d\n", qr_width, scale);  	r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale); -	v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5; +	v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg); +	if (v_margin < 0) +		return -ENOSPC; +	v_margin /= 5;  	drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);  	r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale, @@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)  	/* Fill with the background color, and draw text on top */  	drm_panic_fill(sb, &r_screen, bg_color); -	if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr)) +	if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))  		drm_panic_logo_draw(sb, &r_logo, font, fg_color);  	draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index b13a17276d07..88385dc3b30d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,  	u32 link_target, link_dwords;  	bool switch_context = gpu->exec_state != exec_state;  	bool switch_mmu_context = gpu->mmu_context != mmu_context; -	unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); +	unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);  	bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;  	bool has_blt = !!(gpu->identity.minor_features5 &  			  chipMinorFeatures5_BLT_ENGINE); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 77a0199f9ea5..4a4cace1f879 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,  		REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;  } +static bool fixup_dmc_evt(struct intel_display *display, +			  enum intel_dmc_id dmc_id, +			  i915_reg_t reg_ctl, u32 *data_ctl, +			  i915_reg_t reg_htp, u32 *data_htp) +{ +	if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl)) +		return false; + +	if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp)) +		return false; + +	/* make sure reg_ctl and reg_htp are for the same event */ +	if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) != +	    i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0))) +		return false; + +	/* +	 * On ADL-S the HRR event handler is not restored after DC6. +	 * Clear it to zero from the beginning to avoid mismatches later. +	 */ +	if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN && +	    is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { +		*data_ctl = 0; +		*data_htp = 0; +		return true; +	} + +	return false; +} +  static bool disable_dmc_evt(struct intel_display *display,  			    enum intel_dmc_id dmc_id,  			    i915_reg_t reg, u32 data) @@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,  	for (i = 0; i < mmio_count; i++) {  		dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);  		dmc_info->mmiodata[i] = mmiodata[i]; +	} + +	for (i = 0; i < mmio_count - 1; i++) { +		u32 orig_mmiodata[2] = { +			dmc_info->mmiodata[i], +			dmc_info->mmiodata[i+1], +		}; + +		if (!fixup_dmc_evt(display, dmc_id, +				   dmc_info->mmioaddr[i], &dmc_info->mmiodata[i], +				   dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1])) +			continue; + +		drm_dbg_kms(display->drm, +			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n", +			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), +			    orig_mmiodata[0], dmc_info->mmiodata[i]); +		drm_dbg_kms(display->drm, +			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n", +			    i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]), +			    orig_mmiodata[1], dmc_info->mmiodata[i+1]); +	} +	for (i = 0; i < mmio_count; i++) {  		drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", -			    i, mmioaddr[i], mmiodata[i], +			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],  			    is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :  			    is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",  			    disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i], diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 22a4a1575d22..c48384e58ea1 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -2113,10 +2113,11 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)  	if (intel_fb_uses_dpt(fb))  		intel_dpt_destroy(intel_fb->dpt_vm); -	intel_frontbuffer_put(intel_fb->frontbuffer); -  	intel_fb_bo_framebuffer_fini(intel_fb_bo(fb)); +	intel_frontbuffer_put(intel_fb->frontbuffer); + +	kfree(intel_fb->panic);  	kfree(intel_fb);  } @@ -2215,19 +2216,27 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  	struct intel_display *display = to_intel_display(obj->dev);  	struct drm_framebuffer *fb = &intel_fb->base;  	u32 max_stride; -	int ret = -EINVAL; +	int ret;  	int i; -	ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd); -	if (ret) -		return ret; +	intel_fb->panic = intel_panic_alloc(); +	if (!intel_fb->panic) +		return -ENOMEM; +	/* +	 * intel_frontbuffer_get() must be done before +	 * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race. +	 */  	intel_fb->frontbuffer = intel_frontbuffer_get(obj);  	if (!intel_fb->frontbuffer) {  		ret = -ENOMEM; -		goto err; +		goto err_free_panic;  	} +	ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd); +	if (ret) +		goto err_frontbuffer_put; +  	ret = -EINVAL;  	if (!drm_any_plane_has_format(display->drm,  				      mode_cmd->pixel_format, @@ -2235,7 +2244,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  		drm_dbg_kms(display->drm,  			    "unsupported pixel format %p4cc / modifier 0x%llx\n",  			    &mode_cmd->pixel_format, mode_cmd->modifier[0]); -		goto err_frontbuffer_put; +		goto err_bo_framebuffer_fini;  	}  	max_stride = intel_fb_max_stride(display, mode_cmd->pixel_format, @@ -2246,7 +2255,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?  			    "tiled" : "linear",  			    mode_cmd->pitches[0], max_stride); -		goto err_frontbuffer_put; +		goto err_bo_framebuffer_fini;  	}  	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */ @@ -2254,7 +2263,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  		drm_dbg_kms(display->drm,  			    "plane 0 offset (0x%08x) must be 0\n",  			    mode_cmd->offsets[0]); -		goto err_frontbuffer_put; +		goto err_bo_framebuffer_fini;  	}  	drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd); @@ -2264,7 +2273,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {  			drm_dbg_kms(display->drm, "bad plane %d handle\n", i); -			goto err_frontbuffer_put; +			goto err_bo_framebuffer_fini;  		}  		stride_alignment = intel_fb_stride_alignment(fb, i); @@ -2272,7 +2281,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  			drm_dbg_kms(display->drm,  				    "plane %d pitch (%d) must be at least %u byte aligned\n",  				    i, fb->pitches[i], stride_alignment); -			goto err_frontbuffer_put; +			goto err_bo_framebuffer_fini;  		}  		if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { @@ -2282,7 +2291,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  				drm_dbg_kms(display->drm,  					    "ccs aux plane %d pitch (%d) must be %d\n",  					    i, fb->pitches[i], ccs_aux_stride); -				goto err_frontbuffer_put; +				goto err_bo_framebuffer_fini;  			}  		} @@ -2291,7 +2300,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  	ret = intel_fill_fb_info(display, intel_fb);  	if (ret) -		goto err_frontbuffer_put; +		goto err_bo_framebuffer_fini;  	if (intel_fb_uses_dpt(fb)) {  		struct i915_address_space *vm; @@ -2317,10 +2326,13 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,  err_free_dpt:  	if (intel_fb_uses_dpt(fb))  		intel_dpt_destroy(intel_fb->dpt_vm); +err_bo_framebuffer_fini: +	intel_fb_bo_framebuffer_fini(obj);  err_frontbuffer_put:  	intel_frontbuffer_put(intel_fb->frontbuffer); -err: -	intel_fb_bo_framebuffer_fini(obj); +err_free_panic: +	kfree(intel_fb->panic); +  	return ret;  } @@ -2347,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,  struct intel_framebuffer *intel_framebuffer_alloc(void)  {  	struct intel_framebuffer *intel_fb; -	struct intel_panic *panic;  	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);  	if (!intel_fb)  		return NULL; -	panic = intel_panic_alloc(); -	if (!panic) { -		kfree(intel_fb); -		return NULL; -	} - -	intel_fb->panic = panic; -  	return intel_fb;  } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 43be5377ddc1..73ed28ac9573 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -270,6 +270,8 @@ static void frontbuffer_release(struct kref *ref)  	spin_unlock(&display->fb_tracking.lock);  	i915_active_fini(&front->write); + +	drm_gem_object_put(obj);  	kfree_rcu(front, rcu);  } @@ -287,6 +289,8 @@ intel_frontbuffer_get(struct drm_gem_object *obj)  	if (!front)  		return NULL; +	drm_gem_object_get(obj); +  	front->obj = obj;  	kref_init(&front->ref);  	atomic_set(&front->bits, 0); @@ -299,8 +303,12 @@ intel_frontbuffer_get(struct drm_gem_object *obj)  	spin_lock(&display->fb_tracking.lock);  	cur = intel_bo_set_frontbuffer(obj, front);  	spin_unlock(&display->fb_tracking.lock); -	if (cur != front) + +	if (cur != front) { +		drm_gem_object_put(obj);  		kfree(front); +	} +  	return cur;  } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 01bf304c705f..10eb93a34cf2 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -3402,6 +3402,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)  	struct intel_display *display = to_intel_display(intel_dp);  	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { +		/* Selective fetch prior LNL */  		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {  			/* can we turn CFF off? */  			if (intel_dp->psr.busy_frontbuffer_bits == 0) @@ -3420,12 +3421,19 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)  		intel_psr_configure_full_frame_update(intel_dp);  		intel_psr_force_update(intel_dp); +	} else if (!intel_dp->psr.psr2_sel_fetch_enabled) { +		/* +		 * PSR1 on all platforms +		 * PSR2 HW tracking +		 * Panel Replay Full frame update +		 */ +		intel_psr_force_update(intel_dp);  	} else { +		/* Selective update LNL onwards */  		intel_psr_exit(intel_dp);  	} -	if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) && -	    !intel_dp->psr.busy_frontbuffer_bits) +	if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)  		queue_work(display->wq.unordered, &intel_dp->psr.work);  } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h index b6dc3d1b9bb1..b682969e3a29 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h @@ -89,12 +89,10 @@ i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,  	if (!front) {  		RCU_INIT_POINTER(obj->frontbuffer, NULL); -		drm_gem_object_put(intel_bo_to_drm_bo(obj));  	} else if (rcu_access_pointer(obj->frontbuffer)) {  		cur = rcu_dereference_protected(obj->frontbuffer, true);  		kref_get(&cur->ref);  	} else { -		drm_gem_object_get(intel_bo_to_drm_bo(obj));  		rcu_assign_pointer(obj->frontbuffer, front);  	} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 3e7e5badcc2b..2c651ec024ef 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -1325,9 +1325,16 @@ static int ct_receive(struct intel_guc_ct *ct)  static void ct_try_receive_message(struct intel_guc_ct *ct)  { +	struct intel_guc *guc = ct_to_guc(ct);  	int ret; -	if (GEM_WARN_ON(!ct->enabled)) +	if (!ct->enabled) { +		GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress); +		return; +	} + +	/* When interrupt disabled, message handling is not expected */ +	if (!guc->interrupts.enabled)  		return;  	ret = ct_receive(ct); diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 6d8325c76697..7fc6af703307 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -25,19 +25,18 @@  struct imx_parallel_display_encoder {  	struct drm_encoder encoder; -	struct drm_bridge bridge; -	struct imx_parallel_display *pd;  };  struct imx_parallel_display {  	struct device *dev;  	u32 bus_format;  	struct drm_bridge *next_bridge; +	struct drm_bridge bridge;  };  static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)  { -	return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; +	return container_of(b, struct imx_parallel_display, bridge);  }  static const u32 imx_pd_bus_fmts[] = { @@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)  	if (IS_ERR(imxpd_encoder))  		return PTR_ERR(imxpd_encoder); -	imxpd_encoder->pd = imxpd;  	encoder = &imxpd_encoder->encoder; -	bridge = &imxpd_encoder->bridge; +	bridge = &imxpd->bridge;  	ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);  	if (ret)  		return ret; -	bridge->funcs = &imx_pd_bridge_funcs;  	drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);  	connector = drm_bridge_connector_init(drm, encoder); @@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)  	u32 bus_format = 0;  	const char *fmt; -	imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); -	if (!imxpd) -		return -ENOMEM; +	imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge, +				      &imx_pd_bridge_funcs); +	if (IS_ERR(imxpd)) +		return PTR_ERR(imxpd);  	/* port@1 is the output port */  	imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); @@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, imxpd); +	devm_drm_bridge_add(dev, &imxpd->bridge); +  	return component_add(dev, &imx_pd_ops);  } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index eb5537f0ac90..31ff2922758a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -686,10 +686,6 @@ err_free:  	for (i = 0; i < private->data->mmsys_dev_num; i++)  		private->all_drm_private[i]->drm = NULL;  err_put_dev: -	for (i = 0; i < private->data->mmsys_dev_num; i++) { -		/* For device_find_child in mtk_drm_get_all_priv() */ -		put_device(private->all_drm_private[i]->dev); -	}  	put_device(private->mutex_dev);  	return ret;  } @@ -697,18 +693,12 @@ err_put_dev:  static void mtk_drm_unbind(struct device *dev)  {  	struct mtk_drm_private *private = dev_get_drvdata(dev); -	int i;  	/* for multi mmsys dev, unregister drm dev in mmsys master */  	if (private->drm_master) {  		drm_dev_unregister(private->drm);  		mtk_drm_kms_deinit(private->drm);  		drm_dev_put(private->drm); - -		for (i = 0; i < private->data->mmsys_dev_num; i++) { -			/* For device_find_child in mtk_drm_get_all_priv() */ -			put_device(private->all_drm_private[i]->dev); -		}  		put_device(private->mutex_dev);  	}  	private->mtk_drm_bound = false; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index fc62fef2fed8..4e6dc16e4a4c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)  	return true;  } +#define NEXT_BLK(blk) \ +	((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) +  static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)  {  	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); @@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)  	for (blk = (const struct block_header *) fw_image->data;  	     (const u8*) blk < fw_image->data + fw_image->size; -	     blk = (const struct block_header *) &blk->data[blk->size >> 2]) { +	     blk = NEXT_BLK(blk)) {  		if (blk->size == 0)  			continue; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index afaa3cfefd35..4b5a4edd0702 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,  	return 0;  } -static bool -adreno_smmu_has_prr(struct msm_gpu *gpu) -{ -	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); -	return adreno_smmu && adreno_smmu->set_prr_addr; -} -  int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,  		     uint32_t param, uint64_t *value, uint32_t *len)  { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4b970a59deaf..2f8156051d9b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,  	adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,  							    dpu_kms->perf.perf_cfg); +	if (dpu_kms->catalog->caps->has_3d_merge) +		adjusted_mode_clk /= 2; +  	/*  	 * The given mode, adjusted for the perf clock factor, should not exceed  	 * the max core clock rate diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 6641455c4ec6..9f8d1bba9139 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {  		.base = 0x200, .len = 0xa0,}, \  	.csc_blk = {.name = "csc", \  		.base = 0x320, .len = 0x100,}, \ -	.format_list = plane_formats_yuv, \ -	.num_formats = ARRAY_SIZE(plane_formats_yuv), \ +	.format_list = plane_formats, \ +	.num_formats = ARRAY_SIZE(plane_formats), \  	.rotation_cfg = NULL, \  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index f54cf0faa1c7..905524ceeb1f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,  	int i;  	for (i = 0; i < DPU_MAX_PLANES; i++) { +		uint32_t w = src_w, h = src_h; +  		if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { -			src_w /= chroma_subsmpl_h; -			src_h /= chroma_subsmpl_v; +			w /= chroma_subsmpl_h; +			h /= chroma_subsmpl_v;  		} -		pixel_ext->num_ext_pxls_top[i] = src_h; -		pixel_ext->num_ext_pxls_left[i] = src_w; +		pixel_ext->num_ext_pxls_top[i] = h; +		pixel_ext->num_ext_pxls_left[i] = w;  	}  } @@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,  	 * We already have verified scaling against platform limitations.  	 * Now check if the SSPP supports scaling at all.  	 */ -	if (!sblk->scaler_blk.len && +	if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&  	    ((drm_rect_width(&new_plane_state->src) >> 16 !=  	      drm_rect_width(&new_plane_state->dst)) ||  	     (drm_rect_height(&new_plane_state->src) >> 16 != @@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,  							     state, plane_state,  							     prev_adjacent_plane_state);  		if (ret) -			break; +			return ret;  		prev_adjacent_plane_state = plane_state;  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 2c77c74fac0f..d9c3b0a1d091 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,  	if (!reqs->scale && !reqs->yuv)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); -	if (!hw_sspp && reqs->scale) +	if (!hw_sspp && !reqs->yuv)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);  	if (!hw_sspp)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c index cd73468e369a..7545c0293efb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,  		DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",  			  fb->width, dpu_wb_conn->maxlinewidth);  		return -EINVAL; +	} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { +		DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier); +		return -EINVAL;  	}  	return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index e391505fdaf0..3cbf08231492 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -109,7 +109,6 @@ struct msm_dsi_phy {  	struct msm_dsi_dphy_timing timing;  	const struct msm_dsi_phy_cfg *cfg;  	void *tuning_cfg; -	void *pll_data;  	enum msm_dsi_phy_usecase usecase;  	bool regulator_ldo_mode; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 32f06edd21a9..c5e1d2016bcc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)  	u32 data;  	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	if (pll->pll_enable_cnt++) { -		spin_unlock_irqrestore(&pll->pll_enable_lock, flags); -		WARN_ON(pll->pll_enable_cnt == INT_MAX); -		return; -	} +	pll->pll_enable_cnt++; +	WARN_ON(pll->pll_enable_cnt == INT_MAX);  	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);  	data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; @@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)  	spin_lock_init(&pll_7nm->pll_enable_lock);  	pll_7nm->phy = phy; -	phy->pll_data = pll_7nm;  	ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);  	if (ret) { @@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,  	u32 const delay_us = 5;  	u32 const timeout_us = 1000;  	struct msm_dsi_dphy_timing *timing = &phy->timing; -	struct dsi_pll_7nm *pll = phy->pll_data;  	void __iomem *base = phy->base;  	bool less_than_1500_mhz; -	unsigned long flags;  	u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;  	u32 glbl_pemph_ctrl_0;  	u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; @@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,  		glbl_rescode_bot_ctrl = 0x3c;  	} -	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	pll->pll_enable_cnt = 1;  	/* de-assert digital and pll power down */  	data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |  	       DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;  	writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); -	spin_unlock_irqrestore(&pll->pll_enable_lock, flags);  	/* Assert PLL core reset */  	writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); @@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)  static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)  { -	struct dsi_pll_7nm *pll = phy->pll_data;  	void __iomem *base = phy->base; -	unsigned long flags;  	u32 data;  	DBG(""); @@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)  	writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);  	writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); -	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	pll->pll_enable_cnt = 0;  	/* Turn off all PHY blocks */  	writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); -	spin_unlock_irqrestore(&pll->pll_enable_lock, flags);  	/* make sure phy is turned off */  	wmb(); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 07d8cdd6bb2e..9f7fbe577abb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)  		put_pages(obj);  	} -	if (obj->resv != &obj->_resv) { +	/* +	 * In error paths, we could end up here before msm_gem_new_handle() +	 * has changed obj->resv to point to the shared resv.  In this case, +	 * we don't want to drop a ref to the shared r_obj that we haven't +	 * taken yet. +	 */ +	if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {  		struct drm_gem_object *r_obj =  			container_of(obj->resv, struct drm_gem_object, _resv); -		WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE)); -  		/* Drop reference we hold to shared resv obj: */  		drm_gem_object_put(r_obj);  	} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3ab3b27134f9..75d9f3574370 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)  					 submit->user_fence,  					 DMA_RESV_USAGE_BOOKKEEP,  					 DMA_RESV_USAGE_BOOKKEEP); + +		last_fence = vm->last_fence; +		vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); +		dma_fence_put(last_fence); +  		return;  	} @@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)  			dma_resv_add_fence(obj->resv, submit->user_fence,  					   DMA_RESV_USAGE_READ);  	} - -	last_fence = vm->last_fence; -	vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); -	dma_fence_put(last_fence);  }  static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 8316af1723c2..89a95977f41e 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -971,6 +971,7 @@ static int  lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)  {  	struct drm_device *dev = job->vm->drm; +	struct msm_drm_private *priv = dev->dev_private;  	int i = job->nr_ops++;  	int ret = 0; @@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)  		break;  	} +	if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && +	    !adreno_smmu_has_prr(priv->gpu)) { +		ret = UERR(EINVAL, dev, "PRR not supported\n"); +	} +  	return ret;  } @@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)  	 * Maybe we could allow just UNMAP ops?  OTOH userspace should just  	 * immediately close the device file and all will be torn down.  	 */ -	if (to_msm_vm(ctx->vm)->unusable) +	if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)  		return UERR(EPIPE, dev, "context is unusable");  	/* diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index a597f2bee30b..2894fc118485 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)  	return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);  } +static inline bool +adreno_smmu_has_prr(struct msm_gpu *gpu) +{ +	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + +	if (!adreno_smmu) +		return false; + +	return adreno_smmu && adreno_smmu->set_prr_addr; +} +  /* It turns out that all targets use the same ringbuffer size */  #define MSM_GPU_RINGBUFFER_SZ SZ_32K  #define MSM_GPU_RINGBUFFER_BLKSIZE 32 diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 0e18619f96cb..a188617653e8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall  	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);  	if (ret != p->count) { +		kfree(p->pages); +		p->pages = NULL;  		p->count = ret;  		return -ENOMEM;  	} @@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo  	struct kmem_cache *pt_cache = get_pt_cache(mmu);  	uint32_t remaining_pt_count = p->count - p->ptr; +	if (!p->pages) +		return; +  	if (p->count > 0)  		trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e60f7892f5ce..a7bf539e5d86 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,  	return 0;  } +static bool +nouveau_sched_job_list_empty(struct nouveau_sched *sched) +{ +	bool empty; + +	spin_lock(&sched->job.list.lock); +	empty = list_empty(&sched->job.list.head); +	spin_unlock(&sched->job.list.lock); + +	return empty; +}  static void  nouveau_sched_fini(struct nouveau_sched *sched) @@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)  	struct drm_gpu_scheduler *drm_sched = &sched->base;  	struct drm_sched_entity *entity = &sched->entity; -	rmb(); /* for list_empty to work without lock */ -	wait_event(sched->job.wq, list_empty(&sched->job.list.head)); +	wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));  	drm_sched_entity_fini(entity);  	drm_sched_fini(drm_sched); diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 2fc7b0779b37..893af9b16756 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)  	dsi->lanes = 4;  	dsi->format = MIPI_DSI_FMT_RGB888;  	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | -			  MIPI_DSI_MODE_LPM; +			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;  	kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,  					   &kingdisplay_panel_funcs, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 04d91929eedd..d5f821d6b23c 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {  	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,  }; +/* + * The mode data for this panel has been reverse engineered without access + * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all + * other panels results in garbage data on the display. + */  static const struct drm_display_mode t28cp45tn89_mode = {  	.clock = 6008,  	.hdisplay = 240, @@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {  	.vtotal = 320 + 8 + 4 + 4,  	.width_mm = 43,  	.height_mm = 57, -	.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC, +	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,  };  static const struct drm_display_mode et028013dma_mode = { diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 9bf06e55eaee..df767e82148a 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1099,6 +1099,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)  	}  	panthor_job_irq_suspend(&ptdev->fw->irq); +	panthor_fw_stop(ptdev);  }  /** diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 6dec4354e378..7870e7dbaa5d 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)  		break;  	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: -		/* Partial unmaps might trigger a remap with either a prev or a next VA, -		 * but not both. +		/* Two VMAs can be needed for an unmap, as an unmap can happen +		 * in the middle of a drm_gpuva, requiring a remap with both +		 * prev & next VA. Or an unmap can span more than one drm_gpuva +		 * where the first and last ones are covered partially, requring +		 * a remap for the first with a prev VA and remap for the last +		 * with a next VA.  		 */ -		vma_count = 1; +		vma_count = 2;  		break;  	default: diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e821d67af7..9c8907bc61d9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,  	ret = pci_enable_device(pdev);  	if (ret) -		goto err_free; +		return ret;  	pci_set_drvdata(pdev, ddev);  	ret = radeon_driver_load_kms(ddev, flags);  	if (ret) -		goto err_agp; +		goto err;  	ret = drm_dev_register(ddev, flags);  	if (ret) -		goto err_agp; +		goto err;  	if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))  		format = drm_format_info(DRM_FORMAT_C8); @@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,  	return 0; -err_agp: +err:  	pci_disable_device(pdev); -err_free: -	drm_dev_put(ddev);  	return ret;  }  static void -radeon_pci_remove(struct pci_dev *pdev) -{ -	struct drm_device *dev = pci_get_drvdata(pdev); - -	drm_put_dev(dev); -} - -static void  radeon_pci_shutdown(struct pci_dev *pdev)  { -	/* if we are running in a VM, make sure the device -	 * torn down properly on reboot/shutdown -	 */ -	if (radeon_device_is_virtual()) -		radeon_pci_remove(pdev); -  #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)  	/*  	 * Some adapters need to be suspended before a @@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {  	.name = DRIVER_NAME,  	.id_table = pciidlist,  	.probe = radeon_pci_probe, -	.remove = radeon_pci_remove,  	.shutdown = radeon_pci_shutdown,  	.driver.pm = &radeon_pm_ops,  }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 645e33bf7947..ba1446acd703 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)  	rdev->agp = NULL;  done_free: -	kfree(rdev);  	dev->dev_private = NULL;  } diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 7b613997bb50..727cdf768161 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)  	regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,  		     FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) | -		     FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1)); +		     FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));  }  static enum drm_connector_status diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index b50927a824b4..7ec7bea5e38e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1031,7 +1031,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,  		return format;  	if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 || -	    drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) { +	    drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) {  		drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",  			drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,  			drm_rect_width(dest), drm_rect_height(dest)); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 5a4697f636f2..c8e949f4a568 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,  	entity->guilty = guilty;  	entity->num_sched_list = num_sched_list;  	entity->priority = priority; +	entity->last_user = current->group_leader;  	/*  	 * It's perfectly valid to initialize an entity without having a valid  	 * scheduler attached. It's just not valid to use the scheduler before it @@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)  	/* For a killed process disallow further enqueueing of jobs. */  	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); -	if ((!last_user || last_user == current->group_leader) && +	if (last_user == current->group_leader &&  	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))  		drm_sched_entity_kill(entity); @@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)  		drm_sched_rq_remove_entity(entity->rq, entity);  		entity->rq = rq;  	} -	spin_unlock(&entity->lock);  	if (entity->num_sched_list == 1)  		entity->sched_list = NULL; + +	spin_unlock(&entity->lock);  }  /** diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 46119aacb809..c39f0245e3a9 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -965,13 +965,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,  	dma_resv_assert_held(resv);  	dma_resv_for_each_fence(&cursor, resv, usage, fence) { -		/* Make sure to grab an additional ref on the added fence */ -		dma_fence_get(fence); -		ret = drm_sched_job_add_dependency(job, fence); -		if (ret) { -			dma_fence_put(fence); +		/* +		 * As drm_sched_job_add_dependency always consumes the fence +		 * reference (even when it fails), and dma_resv_for_each_fence +		 * is not obtaining one, we need to grab one before calling. +		 */ +		ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); +		if (ret)  			return ret; -		}  	}  	return 0;  } diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 06cb6b02ec64..51f2a03847f9 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -342,6 +342,7 @@  #define POWERGATE_ENABLE			XE_REG(0xa210)  #define   RENDER_POWERGATE_ENABLE		REG_BIT(0)  #define   MEDIA_POWERGATE_ENABLE		REG_BIT(1) +#define   MEDIA_SAMPLERS_POWERGATE_ENABLE	REG_BIT(2)  #define   VDN_HCP_POWERGATE_ENABLE(n)		REG_BIT(3 + 2 * (n))  #define   VDN_MFXVDENC_POWERGATE_ENABLE(n)	REG_BIT(4 + 2 * (n)) diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index 69e2840c7ef0..663a79ec960d 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -66,6 +66,7 @@ KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc);  /**   * xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters + * @test: test context object   * @prev: the pointer to the previous parameter to iterate from or NULL   * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE   * @@ -242,6 +243,7 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);  /**   * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters + * @test: test context object   * @prev: the pointer to the previous parameter to iterate from or NULL   * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE   * @@ -266,6 +268,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);  /**   * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters + * @test: test context object   * @prev: the pointer to the previous parameter to iterate from or NULL   * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE   * @@ -290,6 +293,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);  /**   * xe_pci_id_gen_param - Generate struct pci_device_id parameters + * @test: test context object   * @prev: the pointer to the previous parameter to iterate from or NULL   * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE   * @@ -376,6 +380,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);  /**   * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters + * @test: test context object   * @prev: the previously returned value, or NULL for the first iteration   * @desc: the buffer for a parameter name   * diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index d5dbc51e8612..bc5b4c5fab81 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -182,7 +182,6 @@ int xe_bo_evict_all(struct xe_device *xe)  static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)  { -	struct xe_device *xe = xe_bo_device(bo);  	int ret;  	ret = xe_bo_restore_pinned(bo); @@ -201,13 +200,6 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)  		}  	} -	/* -	 * We expect validate to trigger a move VRAM and our move code -	 * should setup the iosys map. -	 */ -	xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) || -		  !iosys_map_is_null(&bo->vmap)); -  	return 0;  } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 2883b39c9b37..34d33965eac2 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -1070,7 +1070,7 @@ void xe_device_l2_flush(struct xe_device *xe)  	spin_lock(>->global_invl_lock);  	xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); -	if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) +	if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true))  		xe_gt_err_once(gt, "Global invalidation timeout\n");  	spin_unlock(>->global_invl_lock); diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 7fdd0a97a628..5edc0cad47e2 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)  		ggtt->pt_ops = &xelp_pt_ops;  	ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); +	if (!ggtt->wq) +		return -ENOMEM; +  	__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));  	err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3e0ad7e5b5df..6d3db5e55d98 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)  	unsigned int fw_ref;  	int err; -	if (xe_device_wedged(gt_to_xe(gt))) -		return -ECANCELED; +	if (xe_device_wedged(gt_to_xe(gt))) { +		err = -ECANCELED; +		goto err_pm_put; +	}  	/* We only support GT resets with GuC submission */ -	if (!xe_device_uc_enabled(gt_to_xe(gt))) -		return -ENODEV; +	if (!xe_device_uc_enabled(gt_to_xe(gt))) { +		err = -ENODEV; +		goto err_pm_put; +	}  	xe_gt_info(gt, "reset started\n"); @@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)  	if (!err)  		xe_gt_warn(gt, "reset block failed to get lifted"); -	xe_pm_runtime_get(gt_to_xe(gt)); -  	if (xe_fault_inject_gt_reset()) {  		err = -ECANCELED;  		goto err_fail; @@ -874,6 +876,7 @@ err_fail:  	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));  	xe_device_declare_wedged(gt_to_xe(gt)); +err_pm_put:  	xe_pm_runtime_put(gt_to_xe(gt));  	return err; @@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)  		return;  	xe_gt_info(gt, "reset queued\n"); -	queue_work(gt->ordered_wq, >->reset.worker); +	xe_pm_runtime_get_noresume(gt_to_xe(gt)); +	if (!queue_work(gt->ordered_wq, >->reset.worker)) +		xe_pm_runtime_put(gt_to_xe(gt));  }  void xe_gt_suspend_prepare(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index f8950a52d0a4..bdc9d9877ec4 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -124,6 +124,9 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)  	if (xe_gt_is_main_type(gt))  		gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE; +	if (MEDIA_VERx100(xe) >= 1100 && MEDIA_VERx100(xe) < 1255) +		gtidle->powergate_enable |= MEDIA_SAMPLERS_POWERGATE_ENABLE; +  	if (xe->info.platform != XE_DG1) {  		for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {  			if ((gt->info.engine_mask & BIT(i))) @@ -246,6 +249,11 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)  				drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,  					   str_up_down(pg_status & media_slices[n].status_bit));  	} + +	if (MEDIA_VERx100(xe) >= 1100 && MEDIA_VERx100(xe) < 1255) +		drm_printf(p, "Media Samplers Power Gating Enabled: %s\n", +			   str_yes_no(pg_enabled & MEDIA_SAMPLERS_POWERGATE_ENABLE)); +  	return 0;  } diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 53024eb5670b..94ed8159496f 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -44,6 +44,7 @@  #include "xe_ring_ops_types.h"  #include "xe_sched_job.h"  #include "xe_trace.h" +#include "xe_uc_fw.h"  #include "xe_vm.h"  static struct xe_guc * @@ -1489,7 +1490,17 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)  	xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));  	trace_xe_exec_queue_cleanup_entity(q); -	if (exec_queue_registered(q)) +	/* +	 * Expected state transitions for cleanup: +	 * - If the exec queue is registered and GuC firmware is running, we must first +	 *   disable scheduling and deregister the queue to ensure proper teardown and +	 *   resource release in the GuC, then destroy the exec queue on driver side. +	 * - If the GuC is already stopped (e.g., during driver unload or GPU reset), +	 *   we cannot expect a response for the deregister request. In this case, +	 *   it is safe to directly destroy the exec queue on driver side, as the GuC +	 *   will not process further requests and all resources must be cleaned up locally. +	 */ +	if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw))  		disable_scheduling_deregister(guc, q);  	else  		__guc_exec_queue_destroy(guc, q); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 1d667fa36cf3..a36ce7dce8cc 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -434,7 +434,7 @@ int xe_migrate_init(struct xe_migrate *m)  	err = xe_migrate_lock_prepare_vm(tile, m, vm);  	if (err) -		return err; +		goto err_out;  	if (xe->info.has_usm) {  		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt, @@ -2113,7 +2113,9 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,  		if (current_bytes & ~PAGE_MASK) {  			int pitch = 4; -			current_bytes = min_t(int, current_bytes, S16_MAX * pitch); +			current_bytes = min_t(int, current_bytes, +					      round_down(S16_MAX * pitch, +							 XE_CACHELINE_BYTES));  		}  		__fence = xe_migrate_vram(m, current_bytes, diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index be91343829dd..9a6df79fc5b6 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -867,6 +867,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	if (err)  		return err; +	xe_vram_resize_bar(xe); +  	err = xe_device_probe_early(xe);  	/*  	 * In Boot Survivability mode, no drm card is exposed and driver diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index a1c88f9a6c76..07f96bda638a 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,  	case DRM_GPUVA_OP_MAP:  		if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&  		     !op->map.invalidate_on_bind) || -		    op->map.is_cpu_addr_mirror) +		    (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))  			break;  		err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma, @@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,  	switch (op->base.op) {  	case DRM_GPUVA_OP_MAP:  		if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) || -		    op->map.is_cpu_addr_mirror) +		    (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))  			break;  		bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence, diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 7e2db71ff34e..129e7818565c 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64  	if (!vma)  		return -EINVAL; +	if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) { +		drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n"); +		return 0; +	} +  	if (xe_vma_has_default_mem_attrs(vma))  		return 0; @@ -1034,6 +1039,9 @@ retry:  	if (err)  		return err; +	dpagemap = xe_vma_resolve_pagemap(vma, tile); +	if (!dpagemap && !ctx.devmem_only) +		ctx.device_private_page_owner = NULL;  	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);  	if (IS_ERR(range)) @@ -1054,7 +1062,6 @@ retry:  	range_debug(range, "PAGE FAULT"); -	dpagemap = xe_vma_resolve_pagemap(vma, tile);  	if (--migrate_try_count >= 0 &&  	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {  		ktime_t migrate_start = xe_svm_stats_ktime_get(); @@ -1073,7 +1080,17 @@ retry:  				drm_dbg(&vm->xe->drm,  					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",  					vm->usm.asid, ERR_PTR(err)); -				goto retry; + +				/* +				 * In the devmem-only case, mixed mappings may +				 * be found. The get_pages function will fix +				 * these up to a single location, allowing the +				 * page fault handler to make forward progress. +				 */ +				if (ctx.devmem_only) +					goto get_pages; +				else +					goto retry;  			} else {  				drm_err(&vm->xe->drm,  					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n", @@ -1083,6 +1100,7 @@ retry:  		}  	} +get_pages:  	get_pages_start = xe_svm_stats_ktime_get();  	range_debug(range, "GET PAGES"); diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h index fec331d791e7..b2d09c596714 100644 --- a/drivers/gpu/drm/xe/xe_validation.h +++ b/drivers/gpu/drm/xe/xe_validation.h @@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)   */  DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,  	     if (_T) xe_validation_ctx_fini(_T);, -	     ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); -	       _ret ? NULL : _ctx; }), +	     ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); +	       *_ret ? NULL : _ctx; }),  	     struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, -	     struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); +	     struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);  static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)  {return *_T; }  #define class_xe_validation_is_conditional true @@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)   * exhaustive eviction.   */  #define xe_validation_guard(_ctx, _val, _exec, _flags, _ret)		\ -	scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ +	scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \  	drm_exec_until_all_locked(_exec)  #endif diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 027e6ce648c5..63c65e3d207b 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,  			vops->pt_update_ops[i].num_ops += inc_val;  } +#define XE_VMA_CREATE_MASK (		    \ +	XE_VMA_READ_ONLY |		    \ +	XE_VMA_DUMPABLE |		    \ +	XE_VMA_SYSTEM_ALLOCATOR |           \ +	DRM_GPUVA_SPARSE |		    \ +	XE_VMA_MADV_AUTORESET) +  static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,  				  u8 tile_mask)  { @@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,  	op->base.map.gem.offset = vma->gpuva.gem.offset;  	op->map.vma = vma;  	op->map.immediate = true; -	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE; -	op->map.is_null = xe_vma_is_null(vma); +	op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;  }  static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, @@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)  		kfree(vma);  } -#define VMA_CREATE_FLAG_READ_ONLY		BIT(0) -#define VMA_CREATE_FLAG_IS_NULL			BIT(1) -#define VMA_CREATE_FLAG_DUMPABLE		BIT(2) -#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR	BIT(3) -  static struct xe_vma *xe_vma_create(struct xe_vm *vm,  				    struct xe_bo *bo,  				    u64 bo_offset_or_userptr, @@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,  	struct xe_vma *vma;  	struct xe_tile *tile;  	u8 id; -	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY); -	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL); -	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE); -	bool is_cpu_addr_mirror = -		(flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR); +	bool is_null = (flags & DRM_GPUVA_SPARSE); +	bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);  	xe_assert(vm->xe, start < end);  	xe_assert(vm->xe, end < vm->size); @@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,  		if (!vma)  			return ERR_PTR(-ENOMEM); -		if (is_cpu_addr_mirror) -			vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR; -		if (is_null) -			vma->gpuva.flags |= DRM_GPUVA_SPARSE;  		if (bo)  			vma->gpuva.gem.obj = &bo->ttm.base;  	} @@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,  	vma->gpuva.vm = &vm->gpuvm;  	vma->gpuva.va.addr = start;  	vma->gpuva.va.range = end - start + 1; -	if (read_only) -		vma->gpuva.flags |= XE_VMA_READ_ONLY; -	if (dumpable) -		vma->gpuva.flags |= XE_VMA_DUMPABLE; +	vma->gpuva.flags = flags;  	for_each_tile(tile, vm->xe, id)  		vma->tile_mask |= 0x1 << id; @@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,  		if (__op->op == DRM_GPUVA_OP_MAP) {  			op->map.immediate =  				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE; -			op->map.read_only = -				flags & DRM_XE_VM_BIND_FLAG_READONLY; -			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; -			op->map.is_cpu_addr_mirror = flags & -				DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR; -			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; +			if (flags & DRM_XE_VM_BIND_FLAG_READONLY) +				op->map.vma_flags |= XE_VMA_READ_ONLY; +			if (flags & DRM_XE_VM_BIND_FLAG_NULL) +				op->map.vma_flags |= DRM_GPUVA_SPARSE; +			if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) +				op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR; +			if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE) +				op->map.vma_flags |= XE_VMA_DUMPABLE; +			if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) +				op->map.vma_flags |= XE_VMA_MADV_AUTORESET;  			op->map.pat_index = pat_index;  			op->map.invalidate_on_bind =  				__xe_vm_needs_clear_scratch_pages(vm, flags); @@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,  				.pat_index = op->map.pat_index,  			}; -			flags |= op->map.read_only ? -				VMA_CREATE_FLAG_READ_ONLY : 0; -			flags |= op->map.is_null ? -				VMA_CREATE_FLAG_IS_NULL : 0; -			flags |= op->map.dumpable ? -				VMA_CREATE_FLAG_DUMPABLE : 0; -			flags |= op->map.is_cpu_addr_mirror ? -				VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0; +			flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;  			vma = new_vma(vm, &op->base.map, &default_attr,  				      flags); @@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,  			op->map.vma = vma;  			if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) && -			     !op->map.is_cpu_addr_mirror) || +			     !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||  			    op->map.invalidate_on_bind)  				xe_vma_ops_incr_pt_update_ops(vops,  							      op->tile_mask, 1); @@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,  			op->remap.start = xe_vma_start(old);  			op->remap.range = xe_vma_size(old); -			flags |= op->base.remap.unmap->va->flags & -				XE_VMA_READ_ONLY ? -				VMA_CREATE_FLAG_READ_ONLY : 0; -			flags |= op->base.remap.unmap->va->flags & -				DRM_GPUVA_SPARSE ? -				VMA_CREATE_FLAG_IS_NULL : 0; -			flags |= op->base.remap.unmap->va->flags & -				XE_VMA_DUMPABLE ? -				VMA_CREATE_FLAG_DUMPABLE : 0; -			flags |= xe_vma_is_cpu_addr_mirror(old) ? -				VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0; - +			flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;  			if (op->base.remap.prev) {  				vma = new_vma(vm, op->base.remap.prev,  					      &old->attr, flags); @@ -2832,7 +2809,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,  }  static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma, -				 bool validate) +				 bool res_evict, bool validate)  {  	struct xe_bo *bo = xe_vma_bo(vma);  	struct xe_vm *vm = xe_vma_vm(vma); @@ -2843,7 +2820,8 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,  			err = drm_exec_lock_obj(exec, &bo->ttm.base);  		if (!err && validate)  			err = xe_bo_validate(bo, vm, -					     !xe_vm_in_preempt_fence_mode(vm), exec); +					     !xe_vm_in_preempt_fence_mode(vm) && +					     res_evict, exec);  	}  	return err; @@ -2913,14 +2891,23 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)  }  static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, -			    struct xe_vma_op *op) +			    struct xe_vma_ops *vops, struct xe_vma_op *op)  {  	int err = 0; +	bool res_evict; + +	/* +	 * We only allow evicting a BO within the VM if it is not part of an +	 * array of binds, as an array of binds can evict another BO within the +	 * bind. +	 */ +	res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS);  	switch (op->base.op) {  	case DRM_GPUVA_OP_MAP:  		if (!op->map.invalidate_on_bind)  			err = vma_lock_and_validate(exec, op->map.vma, +						    res_evict,  						    !xe_vm_in_fault_mode(vm) ||  						    op->map.immediate);  		break; @@ -2931,11 +2918,13 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,  		err = vma_lock_and_validate(exec,  					    gpuva_to_vma(op->base.remap.unmap->va), -					    false); +					    res_evict, false);  		if (!err && op->remap.prev) -			err = vma_lock_and_validate(exec, op->remap.prev, true); +			err = vma_lock_and_validate(exec, op->remap.prev, +						    res_evict, true);  		if (!err && op->remap.next) -			err = vma_lock_and_validate(exec, op->remap.next, true); +			err = vma_lock_and_validate(exec, op->remap.next, +						    res_evict, true);  		break;  	case DRM_GPUVA_OP_UNMAP:  		err = check_ufence(gpuva_to_vma(op->base.unmap.va)); @@ -2944,7 +2933,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,  		err = vma_lock_and_validate(exec,  					    gpuva_to_vma(op->base.unmap.va), -					    false); +					    res_evict, false);  		break;  	case DRM_GPUVA_OP_PREFETCH:  	{ @@ -2959,7 +2948,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,  		err = vma_lock_and_validate(exec,  					    gpuva_to_vma(op->base.prefetch.va), -					    false); +					    res_evict, false);  		if (!err && !xe_vma_has_no_bo(vma))  			err = xe_bo_migrate(xe_vma_bo(vma),  					    region_to_mem_type[region], @@ -3005,7 +2994,7 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,  		return err;  	list_for_each_entry(op, &vops->list, link) { -		err = op_lock_and_prep(exec, vm, op); +		err = op_lock_and_prep(exec, vm, vops, op);  		if (err)  			return err;  	} @@ -3267,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);  	 DRM_XE_VM_BIND_FLAG_NULL | \  	 DRM_XE_VM_BIND_FLAG_DUMPABLE | \  	 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \ -	 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) +	 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \ +	 DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)  #ifdef TEST_VM_OPS_ERROR  #define SUPPORTED_FLAGS	(SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) @@ -3382,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,  		    XE_IOCTL_DBG(xe,  (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&  				       !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||  		    XE_IOCTL_DBG(xe, obj && -				 op == DRM_XE_VM_BIND_OP_UNMAP)) { +				 op == DRM_XE_VM_BIND_OP_UNMAP) || +		    XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) && +				 (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {  			err = -EINVAL;  			goto free_bind_ops;  		} @@ -3638,6 +3630,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)  	}  	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); +	if (args->num_binds > 1) +		vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS;  	for (i = 0; i < args->num_binds; ++i) {  		u64 range = bind_ops[i].range;  		u64 addr = bind_ops[i].addr; @@ -4198,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,  	struct xe_vma_ops vops;  	struct drm_gpuva_ops *ops = NULL;  	struct drm_gpuva_op *__op; -	bool is_cpu_addr_mirror = false; +	unsigned int vma_flags = 0;  	bool remap_op = false;  	struct xe_vma_mem_attr tmp_attr;  	u16 default_pat; @@ -4228,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,  				vma = gpuva_to_vma(op->base.unmap.va);  				XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));  				default_pat = vma->attr.default_pat_index; +				vma_flags = vma->gpuva.flags;  			}  			if (__op->op == DRM_GPUVA_OP_REMAP) {  				vma = gpuva_to_vma(op->base.remap.unmap->va);  				default_pat = vma->attr.default_pat_index; +				vma_flags = vma->gpuva.flags;  			}  			if (__op->op == DRM_GPUVA_OP_MAP) { -				op->map.is_cpu_addr_mirror = true; +				op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;  				op->map.pat_index = default_pat;  			}  		} else { @@ -4245,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,  				xe_assert(vm->xe, !remap_op);  				xe_assert(vm->xe, xe_vma_has_no_bo(vma));  				remap_op = true; - -				if (xe_vma_is_cpu_addr_mirror(vma)) -					is_cpu_addr_mirror = true; -				else -					is_cpu_addr_mirror = false; +				vma_flags = vma->gpuva.flags;  			}  			if (__op->op == DRM_GPUVA_OP_MAP) { @@ -4258,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,  				/*  				 * In case of madvise ops DRM_GPUVA_OP_MAP is  				 * always after DRM_GPUVA_OP_REMAP, so ensure -				 * we assign op->map.is_cpu_addr_mirror true -				 * if REMAP is for xe_vma_is_cpu_addr_mirror vma +				 * to propagate the flags from the vma we're +				 * unmapping.  				 */ -				op->map.is_cpu_addr_mirror = is_cpu_addr_mirror; +				op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;  			}  		}  		print_op(vm->xe, __op); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index da39940501d8..d6e2a0fdd4b3 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;  #define XE_VMA_PTE_COMPACT	(DRM_GPUVA_USERBITS << 7)  #define XE_VMA_DUMPABLE		(DRM_GPUVA_USERBITS << 8)  #define XE_VMA_SYSTEM_ALLOCATOR	(DRM_GPUVA_USERBITS << 9) +#define XE_VMA_MADV_AUTORESET	(DRM_GPUVA_USERBITS << 10)  /**   * struct xe_vma_mem_attr - memory attributes associated with vma @@ -345,17 +346,10 @@ struct xe_vm {  struct xe_vma_op_map {  	/** @vma: VMA to map */  	struct xe_vma *vma; +	unsigned int vma_flags;  	/** @immediate: Immediate bind */  	bool immediate;  	/** @read_only: Read only */ -	bool read_only; -	/** @is_null: is NULL binding */ -	bool is_null; -	/** @is_cpu_addr_mirror: is CPU address mirror binding */ -	bool is_cpu_addr_mirror; -	/** @dumpable: whether BO is dumped on GPU hang */ -	bool dumpable; -	/** @invalidate: invalidate the VMA before bind */  	bool invalidate_on_bind;  	/** @pat_index: The pat index to use for this operation. */  	u16 pat_index; @@ -476,6 +470,7 @@ struct xe_vma_ops {  	/** @flag: signify the properties within xe_vma_ops*/  #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)  #define XE_VMA_OPS_FLAG_MADVISE          BIT(1) +#define XE_VMA_OPS_ARRAY_OF_BINDS	 BIT(2)  	u32 flags;  #ifdef TEST_VM_OPS_ERROR  	/** @inject_error: inject error to test error handling */ diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index b44ebf50fedb..652df7a5f4f6 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -26,15 +26,35 @@  #define BAR_SIZE_SHIFT 20 -static void -_resize_bar(struct xe_device *xe, int resno, resource_size_t size) +/* + * Release all the BARs that could influence/block LMEMBAR resizing, i.e. + * assigned IORESOURCE_MEM_64 BARs + */ +static void release_bars(struct pci_dev *pdev) +{ +	struct resource *res; +	int i; + +	pci_dev_for_each_resource(pdev, res, i) { +		/* Resource already un-assigned, do not reset it */ +		if (!res->parent) +			continue; + +		/* No need to release unrelated BARs */ +		if (!(res->flags & IORESOURCE_MEM_64)) +			continue; + +		pci_release_resource(pdev, i); +	} +} + +static void resize_bar(struct xe_device *xe, int resno, resource_size_t size)  {  	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);  	int bar_size = pci_rebar_bytes_to_size(size);  	int ret; -	if (pci_resource_len(pdev, resno)) -		pci_release_resource(pdev, resno); +	release_bars(pdev);  	ret = pci_resize_resource(pdev, resno, bar_size);  	if (ret) { @@ -50,7 +70,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)   * if force_vram_bar_size is set, attempt to set to the requested size   * else set to maximum possible size   */ -static void resize_vram_bar(struct xe_device *xe) +void xe_vram_resize_bar(struct xe_device *xe)  {  	int force_vram_bar_size = xe_modparam.force_vram_bar_size;  	struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -119,7 +139,7 @@ static void resize_vram_bar(struct xe_device *xe)  	pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);  	pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY); -	_resize_bar(xe, LMEM_BAR, rebar_size); +	resize_bar(xe, LMEM_BAR, rebar_size);  	pci_assign_unassigned_bus_resources(pdev->bus);  	pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd); @@ -148,8 +168,6 @@ static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region *  		return -ENXIO;  	} -	resize_vram_bar(xe); -  	lmem_bar->io_start = pci_resource_start(pdev, LMEM_BAR);  	lmem_bar->io_size = pci_resource_len(pdev, LMEM_BAR);  	if (!lmem_bar->io_size) diff --git a/drivers/gpu/drm/xe/xe_vram.h b/drivers/gpu/drm/xe/xe_vram.h index 72860f714fc6..13505cfb184d 100644 --- a/drivers/gpu/drm/xe/xe_vram.h +++ b/drivers/gpu/drm/xe/xe_vram.h @@ -11,6 +11,7 @@  struct xe_device;  struct xe_vram_region; +void xe_vram_resize_bar(struct xe_device *xe);  int xe_vram_probe(struct xe_device *xe);  struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement);  | 
