diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index cefa50eaf7a4..1ab2bbd3860c 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -598,6 +598,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) if (ret != VM_FAULT_NOPAGE) ret = vmf_insert_pfn(vma, vmf->address, pfn); + if (ret == VM_FAULT_NOPAGE) + folio_mark_accessed(folio); + out: dma_resv_unlock(obj->resv); @@ -638,10 +641,29 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) drm_gem_vm_close(vma); } +static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + loff_t num_pages = obj->size >> PAGE_SHIFT; + pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */ + + if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages)) + return VM_FAULT_SIGBUS; + + file_update_time(vma->vm_file); + + folio_mark_dirty(page_folio(shmem->pages[page_offset])); + + return 0; +} + const struct vm_operations_struct drm_gem_shmem_vm_ops = { .fault = drm_gem_shmem_fault, .open = drm_gem_shmem_vm_open, .close = drm_gem_shmem_vm_close, + .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite, }; EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);