diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cf5d5ad5bbe9..68a260e4f4c7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6416,6 +6416,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, pte_t new_pte; bool new_folio, new_pagecache_folio = false; u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); + bool folio_locked = true; /* * Currently, we are forced to kill the process in the event the @@ -6581,6 +6582,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, hugetlb_count_add(pages_per_huge_page(h), mm); if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { + /* + * No need to keep file folios locked. See comment in + * hugetlb_fault(). + */ + if (!anon_rmap) { + folio_locked = false; + folio_unlock(folio); + } /* Optimization, do the COW without a second fault */ ret = hugetlb_wp(vmf); } @@ -6595,7 +6604,8 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, if (new_folio) folio_set_hugetlb_migratable(folio); - folio_unlock(folio); + if (folio_locked) + folio_unlock(folio); out: hugetlb_vma_unlock_read(vma); @@ -6783,15 +6793,20 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(vmf.orig_pte)) { - /* hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) */ + /* + * Anonymous folios need to be lock since hugetlb_wp() + * checks whether we can re-use the folio exclusively + * for us in case we are the only user of it. + */ folio = page_folio(pte_page(vmf.orig_pte)); - if (!folio_trylock(folio)) { + if (folio_test_anon(folio) && !folio_trylock(folio)) { need_wait_lock = true; goto out_ptl; } folio_get(folio); ret = hugetlb_wp(&vmf); - folio_unlock(folio); + if (folio_test_anon(folio)) + folio_unlock(folio); folio_put(folio); goto out_ptl; } else if (likely(flags & FAULT_FLAG_WRITE)) {