mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 04:21:09 -04:00
drm/amdkfd: Make all TLB-flushes heavy-weight
With only one sequence number we cannot track the need for legacy vs heavy-weight flushes reliably. Always use heavy-weight. Signed-off-by: Felix Kuehling <felix.kuehling@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> (cherry picked from commit c1a3ff1d327820cd9a52bc1056b98681fc088949) Cc: stable@vger.kernel.org
This commit is contained in:
committed by
Alex Deucher
parent
f0997a06e3
commit
9b4e3495d1
@@ -1360,7 +1360,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
|
||||
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
|
||||
if (WARN_ON_ONCE(!peer_pdd))
|
||||
continue;
|
||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
|
||||
kfd_flush_tlb(peer_pdd);
|
||||
}
|
||||
kfree(devices_arr);
|
||||
|
||||
@@ -1455,7 +1455,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
||||
if (WARN_ON_ONCE(!peer_pdd))
|
||||
continue;
|
||||
if (flush_tlb)
|
||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
|
||||
kfd_flush_tlb(peer_pdd);
|
||||
|
||||
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
|
||||
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
|
||||
|
||||
@@ -572,7 +572,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
|
||||
qpd->vmid,
|
||||
qpd->page_table_base);
|
||||
/* invalidate the VM context after pasid and vmid mapping is set up */
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd));
|
||||
|
||||
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
|
||||
dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
|
||||
@@ -610,7 +610,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
|
||||
if (flush_texture_cache_nocpsch(q->device, qpd))
|
||||
dev_err(dev, "Failed to flush TC\n");
|
||||
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd));
|
||||
|
||||
/* Release the vmid mapping */
|
||||
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
|
||||
@@ -1284,7 +1284,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
dqm->dev->adev,
|
||||
qpd->vmid,
|
||||
qpd->page_table_base);
|
||||
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
|
||||
kfd_flush_tlb(pdd);
|
||||
}
|
||||
|
||||
/* Take a safe reference to the mm_struct, which may otherwise
|
||||
|
||||
@@ -1554,13 +1554,13 @@ void kfd_signal_reset_event(struct kfd_node *dev);
|
||||
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
|
||||
void kfd_signal_process_terminate_event(struct kfd_process *p);
|
||||
|
||||
static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
|
||||
enum TLB_FLUSH_TYPE type)
|
||||
static inline void kfd_flush_tlb(struct kfd_process_device *pdd)
|
||||
{
|
||||
struct amdgpu_device *adev = pdd->dev->adev;
|
||||
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
|
||||
|
||||
amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
|
||||
amdgpu_vm_flush_compute_tlb(adev, vm, TLB_FLUSH_HEAVYWEIGHT,
|
||||
pdd->dev->xcc_mask);
|
||||
}
|
||||
|
||||
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
|
||||
|
||||
@@ -1424,7 +1424,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
|
||||
kfd_flush_tlb(pdd);
|
||||
}
|
||||
|
||||
return r;
|
||||
@@ -1571,7 +1571,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
|
||||
}
|
||||
}
|
||||
|
||||
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
|
||||
kfd_flush_tlb(pdd);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
||||
Reference in New Issue
Block a user