|
|
|
|
@@ -207,10 +207,12 @@ static void drm_pagemap_get_devmem_page(struct page *page,
|
|
|
|
|
/**
|
|
|
|
|
* drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
|
|
|
|
|
* @dev: The device for which the pages are being mapped
|
|
|
|
|
* @local_dpagemap: The drm_pagemap pointer of the local drm_pagemap.
|
|
|
|
|
* @pagemap_addr: Array to store DMA information corresponding to mapped pages
|
|
|
|
|
* @migrate_pfn: Array of migrate page frame numbers to map
|
|
|
|
|
* @npages: Number of pages to map
|
|
|
|
|
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
|
|
|
|
|
* @mdetails: Details governing the migration behaviour.
|
|
|
|
|
*
|
|
|
|
|
* This function maps pages of memory for migration usage in GPU SVM. It
|
|
|
|
|
* iterates over each page frame number provided in @migrate_pfn, maps the
|
|
|
|
|
@@ -220,12 +222,15 @@ static void drm_pagemap_get_devmem_page(struct page *page,
|
|
|
|
|
* Returns: 0 on success, -EFAULT if an error occurs during mapping.
|
|
|
|
|
*/
|
|
|
|
|
static int drm_pagemap_migrate_map_pages(struct device *dev,
|
|
|
|
|
struct drm_pagemap *local_dpagemap,
|
|
|
|
|
struct drm_pagemap_addr *pagemap_addr,
|
|
|
|
|
unsigned long *migrate_pfn,
|
|
|
|
|
unsigned long npages,
|
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
|
enum dma_data_direction dir,
|
|
|
|
|
const struct drm_pagemap_migrate_details *mdetails)
|
|
|
|
|
{
|
|
|
|
|
unsigned long i;
|
|
|
|
|
unsigned long num_peer_pages = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < npages;) {
|
|
|
|
|
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
|
|
|
|
|
@@ -236,31 +241,50 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
|
|
|
|
|
if (!page)
|
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(is_zone_device_page(page)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
folio = page_folio(page);
|
|
|
|
|
order = folio_order(folio);
|
|
|
|
|
|
|
|
|
|
dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
|
|
|
|
|
if (dma_mapping_error(dev, dma_addr))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
if (is_device_private_page(page)) {
|
|
|
|
|
struct drm_pagemap_zdd *zdd = page->zone_device_data;
|
|
|
|
|
struct drm_pagemap *dpagemap = zdd->dpagemap;
|
|
|
|
|
struct drm_pagemap_addr addr;
|
|
|
|
|
|
|
|
|
|
pagemap_addr[i] =
|
|
|
|
|
drm_pagemap_addr_encode(dma_addr,
|
|
|
|
|
DRM_INTERCONNECT_SYSTEM,
|
|
|
|
|
order, dir);
|
|
|
|
|
if (dpagemap == local_dpagemap && !mdetails->can_migrate_same_pagemap)
|
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
|
|
num_peer_pages += NR_PAGES(order);
|
|
|
|
|
addr = dpagemap->ops->device_map(dpagemap, dev, page, order, dir);
|
|
|
|
|
if (dma_mapping_error(dev, addr.addr))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
pagemap_addr[i] = addr;
|
|
|
|
|
} else {
|
|
|
|
|
dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
|
|
|
|
|
if (dma_mapping_error(dev, dma_addr))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
pagemap_addr[i] =
|
|
|
|
|
drm_pagemap_addr_encode(dma_addr,
|
|
|
|
|
DRM_INTERCONNECT_SYSTEM,
|
|
|
|
|
order, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
next:
|
|
|
|
|
i += NR_PAGES(order);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (num_peer_pages)
|
|
|
|
|
drm_dbg(local_dpagemap->drm, "Migrating %lu peer pages over interconnect.\n",
|
|
|
|
|
num_peer_pages);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
|
|
|
|
|
* @dev: The device for which the pages were mapped
|
|
|
|
|
* @migrate_pfn: Array of migrate pfns set up for the mapped pages. Used to
|
|
|
|
|
* determine the drm_pagemap of a peer device private page.
|
|
|
|
|
* @pagemap_addr: Array of DMA information corresponding to mapped pages
|
|
|
|
|
* @npages: Number of pages to unmap
|
|
|
|
|
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
|
|
|
|
|
@@ -271,16 +295,27 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
|
|
|
|
|
*/
|
|
|
|
|
static void drm_pagemap_migrate_unmap_pages(struct device *dev,
|
|
|
|
|
struct drm_pagemap_addr *pagemap_addr,
|
|
|
|
|
unsigned long *migrate_pfn,
|
|
|
|
|
unsigned long npages,
|
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
|
{
|
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < npages;) {
|
|
|
|
|
if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
|
|
|
|
|
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
|
|
|
|
|
|
|
|
|
|
if (!page || !pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
|
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
|
|
dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
|
|
|
|
|
if (is_zone_device_page(page)) {
|
|
|
|
|
struct drm_pagemap_zdd *zdd = page->zone_device_data;
|
|
|
|
|
struct drm_pagemap *dpagemap = zdd->dpagemap;
|
|
|
|
|
|
|
|
|
|
dpagemap->ops->device_unmap(dpagemap, dev, pagemap_addr[i]);
|
|
|
|
|
} else {
|
|
|
|
|
dma_unmap_page(dev, pagemap_addr[i].addr,
|
|
|
|
|
PAGE_SIZE << pagemap_addr[i].order, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
next:
|
|
|
|
|
i += NR_PAGES(pagemap_addr[i].order);
|
|
|
|
|
@@ -297,13 +332,12 @@ npages_in_range(unsigned long start, unsigned long end)
|
|
|
|
|
* drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
|
|
|
|
|
* @devmem_allocation: The device memory allocation to migrate to.
|
|
|
|
|
* The caller should hold a reference to the device memory allocation,
|
|
|
|
|
* and the reference is consumed by this function unless it returns with
|
|
|
|
|
* and the reference is consumed by this function even if it returns with
|
|
|
|
|
* an error.
|
|
|
|
|
* @mm: Pointer to the struct mm_struct.
|
|
|
|
|
* @start: Start of the virtual address range to migrate.
|
|
|
|
|
* @end: End of the virtual address range to migrate.
|
|
|
|
|
* @timeslice_ms: The time requested for the migrated pagemap pages to
|
|
|
|
|
* be present in @mm before being allowed to be migrated back.
|
|
|
|
|
* @mdetails: Details to govern the migration.
|
|
|
|
|
*
|
|
|
|
|
* This function migrates the specified virtual address range to device memory.
|
|
|
|
|
* It performs the necessary setup and invokes the driver-specific operations for
|
|
|
|
|
@@ -321,7 +355,7 @@ npages_in_range(unsigned long start, unsigned long end)
|
|
|
|
|
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
|
|
|
|
struct mm_struct *mm,
|
|
|
|
|
unsigned long start, unsigned long end,
|
|
|
|
|
unsigned long timeslice_ms)
|
|
|
|
|
const struct drm_pagemap_migrate_details *mdetails)
|
|
|
|
|
{
|
|
|
|
|
const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
|
|
|
|
|
struct drm_pagemap *dpagemap = devmem_allocation->dpagemap;
|
|
|
|
|
@@ -330,9 +364,11 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
|
|
|
|
.start = start,
|
|
|
|
|
.end = end,
|
|
|
|
|
.pgmap_owner = pagemap->owner,
|
|
|
|
|
.flags = MIGRATE_VMA_SELECT_SYSTEM,
|
|
|
|
|
.flags = MIGRATE_VMA_SELECT_SYSTEM | MIGRATE_VMA_SELECT_DEVICE_COHERENT |
|
|
|
|
|
(mdetails->source_peer_migrates ? 0 : MIGRATE_VMA_SELECT_DEVICE_PRIVATE),
|
|
|
|
|
};
|
|
|
|
|
unsigned long i, npages = npages_in_range(start, end);
|
|
|
|
|
unsigned long own_pages = 0, migrated_pages = 0;
|
|
|
|
|
struct vm_area_struct *vas;
|
|
|
|
|
struct drm_pagemap_zdd *zdd = NULL;
|
|
|
|
|
struct page **pages;
|
|
|
|
|
@@ -374,8 +410,10 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
|
|
|
|
zdd = drm_pagemap_zdd_alloc(dpagemap);
|
|
|
|
|
if (!zdd) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto err_free;
|
|
|
|
|
kvfree(buf);
|
|
|
|
|
goto err_out;
|
|
|
|
|
}
|
|
|
|
|
zdd->devmem_allocation = devmem_allocation; /* Owns ref */
|
|
|
|
|
|
|
|
|
|
migrate.vma = vas;
|
|
|
|
|
migrate.src = buf;
|
|
|
|
|
@@ -386,35 +424,84 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
|
|
if (!migrate.cpages) {
|
|
|
|
|
err = -EFAULT;
|
|
|
|
|
/* No pages to migrate. Raced or unknown device pages. */
|
|
|
|
|
err = -EBUSY;
|
|
|
|
|
goto err_free;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (migrate.cpages != npages) {
|
|
|
|
|
/*
|
|
|
|
|
* Some pages to migrate. But we want to migrate all or
|
|
|
|
|
* nothing. Raced or unknown device pages.
|
|
|
|
|
*/
|
|
|
|
|
err = -EBUSY;
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
goto err_aborted_migration;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Count device-private pages to migrate */
|
|
|
|
|
for (i = 0; i < npages;) {
|
|
|
|
|
struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
|
|
|
|
|
unsigned long nr_pages = src_page ? NR_PAGES(folio_order(page_folio(src_page))) : 1;
|
|
|
|
|
|
|
|
|
|
if (src_page && is_zone_device_page(src_page)) {
|
|
|
|
|
if (page_pgmap(src_page) == pagemap)
|
|
|
|
|
own_pages += nr_pages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i += nr_pages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
drm_dbg(dpagemap->drm, "Total pages %lu; Own pages: %lu.\n",
|
|
|
|
|
npages, own_pages);
|
|
|
|
|
if (own_pages == npages) {
|
|
|
|
|
err = 0;
|
|
|
|
|
drm_dbg(dpagemap->drm, "Migration wasn't necessary.\n");
|
|
|
|
|
goto err_aborted_migration;
|
|
|
|
|
} else if (own_pages && !mdetails->can_migrate_same_pagemap) {
|
|
|
|
|
err = -EBUSY;
|
|
|
|
|
drm_dbg(dpagemap->drm, "Migration aborted due to fragmentation.\n");
|
|
|
|
|
goto err_aborted_migration;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
|
|
|
|
|
err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
|
|
|
|
|
migrate.src, npages, DMA_TO_DEVICE);
|
|
|
|
|
err = drm_pagemap_migrate_map_pages(devmem_allocation->dev,
|
|
|
|
|
devmem_allocation->dpagemap, pagemap_addr,
|
|
|
|
|
migrate.src, npages, DMA_TO_DEVICE,
|
|
|
|
|
mdetails);
|
|
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr,
|
|
|
|
|
migrate.src, npages, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
own_pages = 0;
|
|
|
|
|
for (i = 0; i < npages; ++i) {
|
|
|
|
|
struct page *page = pfn_to_page(migrate.dst[i]);
|
|
|
|
|
struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
|
|
|
|
|
|
|
|
|
|
if (unlikely(src_page && is_zone_device_page(src_page) &&
|
|
|
|
|
page_pgmap(src_page) == pagemap &&
|
|
|
|
|
!mdetails->can_migrate_same_pagemap)) {
|
|
|
|
|
migrate.dst[i] = 0;
|
|
|
|
|
pages[i] = NULL;
|
|
|
|
|
own_pages++;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
pages[i] = page;
|
|
|
|
|
migrate.dst[i] = migrate_pfn(migrate.dst[i]);
|
|
|
|
|
drm_pagemap_get_devmem_page(page, zdd);
|
|
|
|
|
}
|
|
|
|
|
drm_WARN_ON(dpagemap->drm, !!own_pages);
|
|
|
|
|
|
|
|
|
|
err = ops->copy_to_devmem(pages, pagemap_addr, npages,
|
|
|
|
|
devmem_allocation->pre_migrate_fence);
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr,
|
|
|
|
|
migrate.src, npages, DMA_TO_DEVICE);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
|
|
|
|
|
@@ -423,21 +510,37 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
|
|
|
|
|
|
|
|
|
/* Upon success bind devmem allocation to range and zdd */
|
|
|
|
|
devmem_allocation->timeslice_expiration = get_jiffies_64() +
|
|
|
|
|
msecs_to_jiffies(timeslice_ms);
|
|
|
|
|
zdd->devmem_allocation = devmem_allocation; /* Owns ref */
|
|
|
|
|
msecs_to_jiffies(mdetails->timeslice_ms);
|
|
|
|
|
|
|
|
|
|
err_finalize:
|
|
|
|
|
if (err)
|
|
|
|
|
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
|
|
|
|
|
err_aborted_migration:
|
|
|
|
|
migrate_vma_pages(&migrate);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < npages;) {
|
|
|
|
|
struct page *page = migrate_pfn_to_page(migrate.src[i]);
|
|
|
|
|
unsigned long nr_pages = page ? NR_PAGES(folio_order(page_folio(page))) : 1;
|
|
|
|
|
|
|
|
|
|
if (migrate.src[i] & MIGRATE_PFN_MIGRATE)
|
|
|
|
|
migrated_pages += nr_pages;
|
|
|
|
|
|
|
|
|
|
i += nr_pages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!err && migrated_pages < npages - own_pages) {
|
|
|
|
|
drm_dbg(dpagemap->drm, "Raced while finalizing migration.\n");
|
|
|
|
|
err = -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
migrate_vma_finalize(&migrate);
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
|
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
err_free:
|
|
|
|
|
if (zdd)
|
|
|
|
|
drm_pagemap_zdd_put(zdd);
|
|
|
|
|
drm_pagemap_zdd_put(zdd);
|
|
|
|
|
kvfree(buf);
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
|
devmem_allocation->ops->devmem_release(devmem_allocation);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
|
|
|
|
|
@@ -710,6 +813,7 @@ EXPORT_SYMBOL(drm_pagemap_put);
|
|
|
|
|
int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
|
|
|
|
|
{
|
|
|
|
|
const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
|
|
|
|
|
struct drm_pagemap_migrate_details mdetails = {};
|
|
|
|
|
unsigned long npages, mpages = 0;
|
|
|
|
|
struct page **pages;
|
|
|
|
|
unsigned long *src, *dst;
|
|
|
|
|
@@ -748,8 +852,10 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
|
|
|
|
|
if (err || !mpages)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
|
|
|
|
|
err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
|
|
|
|
|
dst, npages, DMA_FROM_DEVICE);
|
|
|
|
|
err = drm_pagemap_migrate_map_pages(devmem_allocation->dev,
|
|
|
|
|
devmem_allocation->dpagemap, pagemap_addr,
|
|
|
|
|
dst, npages, DMA_FROM_DEVICE,
|
|
|
|
|
&mdetails);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
|
|
|
|
|
@@ -765,8 +871,9 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
|
|
|
|
|
drm_pagemap_migration_unlock_put_pages(npages, dst);
|
|
|
|
|
migrate_device_pages(src, dst, npages);
|
|
|
|
|
migrate_device_finalize(src, dst, npages);
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, dst, npages,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
|
kvfree(buf);
|
|
|
|
|
err_out:
|
|
|
|
|
@@ -809,6 +916,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
|
|
|
|
|
MIGRATE_VMA_SELECT_DEVICE_COHERENT,
|
|
|
|
|
.fault_page = page,
|
|
|
|
|
};
|
|
|
|
|
struct drm_pagemap_migrate_details mdetails = {};
|
|
|
|
|
struct drm_pagemap_zdd *zdd;
|
|
|
|
|
const struct drm_pagemap_devmem_ops *ops;
|
|
|
|
|
struct device *dev = NULL;
|
|
|
|
|
@@ -866,8 +974,8 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
|
|
|
|
|
err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
err = drm_pagemap_migrate_map_pages(dev, zdd->dpagemap, pagemap_addr, migrate.dst, npages,
|
|
|
|
|
DMA_FROM_DEVICE, &mdetails);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_finalize;
|
|
|
|
|
|
|
|
|
|
@@ -884,8 +992,8 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
|
|
|
|
|
migrate_vma_pages(&migrate);
|
|
|
|
|
migrate_vma_finalize(&migrate);
|
|
|
|
|
if (dev)
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, migrate.dst,
|
|
|
|
|
npages, DMA_FROM_DEVICE);
|
|
|
|
|
err_free:
|
|
|
|
|
kvfree(buf);
|
|
|
|
|
err_out:
|
|
|
|
|
|