drm/i915: Add intel_bo_panic_setup() and intel_bo_panic_finish()

Implement both functions for i915 and xe, they prepare the work for
drm_panic support.
They both use kmap_try_from_panic(), and map one page at a time, to
write the panic screen on the framebuffer.

Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
Link: https://lore.kernel.org/r/20250624091501.257661-8-jfalempe@redhat.com
Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
This commit is contained in:
Jocelyn Falempe
2025-06-24 11:01:16 +02:00
committed by Maarten Lankhorst
parent da091afacb
commit 75fb60e5ad
5 changed files with 188 additions and 0 deletions

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_panic.h>
#include "display/intel_display_types.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
@@ -63,3 +64,13 @@ struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
{
return i915_gem_object_alloc_framebuffer();
}
int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
{
return i915_gem_object_panic_setup(sb);
}
void intel_bo_panic_finish(struct intel_framebuffer *fb)
{
return i915_gem_object_panic_finish(fb);
}

View File

@@ -7,6 +7,7 @@
#include <linux/types.h>
struct drm_gem_object;
struct drm_scanout_buffer;
struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -25,5 +26,7 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */

View File

@@ -16,6 +16,7 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
struct drm_scanout_buffer;
enum intel_region_id;
struct intel_framebuffer;
@@ -693,6 +694,9 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space

View File

@@ -4,8 +4,10 @@
*/
#include <drm/drm_cache.h>
#include <drm/drm_panic.h>
#include <linux/vmalloc.h>
#include "display/intel_fb.h"
#include "display/intel_display_types.h"
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
@@ -366,6 +368,67 @@ struct i915_framebuffer {
struct i915_panic_data panic;
};
static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
{
return &container_of_const(fb, struct i915_framebuffer, base)->panic;
}
static void i915_panic_kunmap(struct i915_panic_data *panic)
{
if (panic->vaddr) {
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
kunmap_local(panic->vaddr);
panic->vaddr = NULL;
}
}
static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
struct page *page;
struct page **pages;
struct sgt_iter iter;
/* For a 3840x2160 32 bits Framebuffer, this should require ~64K */
pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
if (!pages)
return NULL;
i = 0;
for_each_sgt_page(page, iter, obj->mm.pages)
pages[i++] = page;
return pages;
}
/*
* The scanout buffer pages are not mapped, so for each pixel,
* use kmap_local_page_try_from_panic() to map the page, and write the pixel.
* Try to keep the map from the previous pixel, to avoid too much map/unmap.
*/
static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color)
{
unsigned int new_page;
unsigned int offset;
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
struct i915_panic_data *panic = to_i915_panic_data(fb);
offset = y * sb->pitch[0] + x * sb->format->cpp[0];
new_page = offset >> PAGE_SHIFT;
offset = offset % PAGE_SIZE;
if (new_page != panic->page) {
i915_panic_kunmap(panic);
panic->page = new_page;
panic->vaddr =
kmap_local_page_try_from_panic(panic->pages[panic->page]);
}
if (panic->vaddr) {
u32 *pix = panic->vaddr + offset;
*pix = color;
}
}
struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
{
struct i915_framebuffer *i915_fb;
@@ -376,6 +439,49 @@ struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
return NULL;
}
/*
* Setup the gem framebuffer for drm_panic access.
* Use current vaddr if it exists, or setup a list of pages.
* pfn is not supported yet.
*/
int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
{
enum i915_map_type has_type;
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
struct i915_panic_data *panic = to_i915_panic_data(fb);
struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
void *ptr;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr) {
if (i915_gem_object_has_iomem(obj))
iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
else
iosys_map_set_vaddr(&sb->map[0], ptr);
return 0;
}
if (i915_gem_object_has_struct_page(obj)) {
panic->pages = i915_gem_object_panic_pages(obj);
if (!panic->pages)
return -ENOMEM;
panic->page = -1;
sb->set_pixel = i915_gem_object_panic_page_set_pixel;
return 0;
}
return -EOPNOTSUPP;
}
void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
{
struct i915_panic_data *panic = to_i915_panic_data(fb);
i915_panic_kunmap(panic);
panic->page = -1;
kfree(panic->pages);
panic->pages = NULL;
}
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)

View File

@@ -1,8 +1,11 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
#include <drm/drm_panic.h>
#include "intel_fb.h"
#include "intel_display_types.h"
#include "xe_bo.h"
@@ -73,6 +76,50 @@ struct xe_framebuffer {
struct xe_panic_data panic;
};
static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
{
return &container_of_const(fb, struct xe_framebuffer, base)->panic;
}
static void xe_panic_kunmap(struct xe_panic_data *panic)
{
if (panic->vaddr) {
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
kunmap_local(panic->vaddr);
panic->vaddr = NULL;
}
}
/*
* The scanout buffer pages are not mapped, so for each pixel,
* use kmap_local_page_try_from_panic() to map the page, and write the pixel.
* Try to keep the map from the previous pixel, to avoid too much map/unmap.
*/
static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color)
{
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
struct xe_panic_data *panic = to_xe_panic_data(fb);
struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
unsigned int new_page;
unsigned int offset;
offset = y * sb->pitch[0] + x * sb->format->cpp[0];
new_page = offset >> PAGE_SHIFT;
offset = offset % PAGE_SIZE;
if (new_page != panic->page) {
xe_panic_kunmap(panic);
panic->page = new_page;
panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
panic->page);
}
if (panic->vaddr) {
u32 *pix = panic->vaddr + offset;
*pix = color;
}
}
struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
{
struct xe_framebuffer *xe_fb;
@@ -83,3 +130,20 @@ struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
return NULL;
}
int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
{
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
struct xe_panic_data *panic = to_xe_panic_data(fb);
panic->page = -1;
sb->set_pixel = xe_panic_page_set_pixel;
return 0;
}
void intel_bo_panic_finish(struct intel_framebuffer *fb)
{
struct xe_panic_data *panic = to_xe_panic_data(fb);
xe_panic_kunmap(panic);
panic->page = -1;
}