mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-04 12:25:07 -04:00
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin:
"Just fixes and cleanups this time around. The mapping cleanups are
preparing the ground for new features, though"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
virtio-vdpa: Drop redundant conversion to bool
vduse: Use fixed 4KB bounce pages for non-4KB page size
vduse: switch to use virtio map API instead of DMA API
vdpa: introduce map ops
vdpa: support virtio_map
virtio: introduce map ops in virtio core
virtio_ring: rename dma_handle to map_handle
virtio: introduce virtio_map container union
virtio: rename dma helpers
virtio_ring: switch to use dma_{map|unmap}_page()
virtio_ring: constify virtqueue pointer for DMA helpers
virtio_balloon: Remove redundant __GFP_NOWARN
vhost: vringh: Fix copy_to_iter return value check
vhost: vringh: Modify the return value check
This commit is contained in:
@@ -5,6 +5,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/vhost_iotlb.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <linux/virtio_blk.h>
|
||||
@@ -70,11 +71,12 @@ struct vdpa_mgmt_dev;
|
||||
/**
|
||||
* struct vdpa_device - representation of a vDPA device
|
||||
* @dev: underlying device
|
||||
* @dma_dev: the actual device that is performing DMA
|
||||
* @vmap: the metadata passed to upper layer to be used for mapping
|
||||
* @driver_override: driver name to force a match; do not set directly,
|
||||
* because core frees it; use driver_set_override() to
|
||||
* set or clear it.
|
||||
* @config: the configuration ops for this device.
|
||||
* @map: the map ops for this device
|
||||
* @cf_lock: Protects get and set access to configuration layout.
|
||||
* @index: device index
|
||||
* @features_valid: were features initialized? for legacy guests
|
||||
@@ -87,9 +89,10 @@ struct vdpa_mgmt_dev;
|
||||
*/
|
||||
struct vdpa_device {
|
||||
struct device dev;
|
||||
struct device *dma_dev;
|
||||
union virtio_map vmap;
|
||||
const char *driver_override;
|
||||
const struct vdpa_config_ops *config;
|
||||
const struct virtio_map_ops *map;
|
||||
struct rw_semaphore cf_lock; /* Protects get/set config */
|
||||
unsigned int index;
|
||||
bool features_valid;
|
||||
@@ -352,11 +355,11 @@ struct vdpa_map_file {
|
||||
* @vdev: vdpa device
|
||||
* @asid: address space identifier
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @get_vq_dma_dev: Get the dma device for a specific
|
||||
* @get_vq_map: Get the map metadata for a specific
|
||||
* virtqueue (optional)
|
||||
* @vdev: vdpa device
|
||||
* @idx: virtqueue index
|
||||
* Returns pointer to structure device or error (NULL)
|
||||
* Returns map token union error (NULL)
|
||||
* @bind_mm: Bind the device to a specific address space
|
||||
* so the vDPA framework can use VA when this
|
||||
* callback is implemented. (optional)
|
||||
@@ -436,7 +439,7 @@ struct vdpa_config_ops {
|
||||
int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
|
||||
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
|
||||
unsigned int asid);
|
||||
struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
|
||||
union virtio_map (*get_vq_map)(struct vdpa_device *vdev, u16 idx);
|
||||
int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
|
||||
void (*unbind_mm)(struct vdpa_device *vdev);
|
||||
|
||||
@@ -446,6 +449,7 @@ struct vdpa_config_ops {
|
||||
|
||||
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||
const struct vdpa_config_ops *config,
|
||||
const struct virtio_map_ops *map,
|
||||
unsigned int ngroups, unsigned int nas,
|
||||
size_t size, const char *name,
|
||||
bool use_va);
|
||||
@@ -457,6 +461,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||
* @member: the name of struct vdpa_device within the @dev_struct
|
||||
* @parent: the parent device
|
||||
* @config: the bus operations that is supported by this device
|
||||
* @map: the map operations that is supported by this device
|
||||
* @ngroups: the number of virtqueue groups supported by this device
|
||||
* @nas: the number of address spaces
|
||||
* @name: name of the vdpa device
|
||||
@@ -464,10 +469,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||
*
|
||||
* Return allocated data structure or ERR_PTR upon error
|
||||
*/
|
||||
#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
|
||||
name, use_va) \
|
||||
#define vdpa_alloc_device(dev_struct, member, parent, config, map, \
|
||||
ngroups, nas, name, use_va) \
|
||||
container_of((__vdpa_alloc_device( \
|
||||
parent, config, ngroups, nas, \
|
||||
parent, config, map, ngroups, nas, \
|
||||
(sizeof(dev_struct) + \
|
||||
BUILD_BUG_ON_ZERO(offsetof( \
|
||||
dev_struct, member))), name, use_va)), \
|
||||
@@ -520,9 +525,9 @@ static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
|
||||
dev_set_drvdata(&vdev->dev, data);
|
||||
}
|
||||
|
||||
static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
|
||||
static inline union virtio_map vdpa_get_map(struct vdpa_device *vdev)
|
||||
{
|
||||
return vdev->dma_dev;
|
||||
return vdev->vmap;
|
||||
}
|
||||
|
||||
static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
|
||||
|
||||
@@ -41,6 +41,15 @@ struct virtqueue {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct vduse_iova_domain;
|
||||
|
||||
union virtio_map {
|
||||
/* Device that performs DMA */
|
||||
struct device *dma_dev;
|
||||
/* VDUSE specific mapping data */
|
||||
struct vduse_iova_domain *iova_domain;
|
||||
};
|
||||
|
||||
int virtqueue_add_outbuf(struct virtqueue *vq,
|
||||
struct scatterlist sg[], unsigned int num,
|
||||
void *data,
|
||||
@@ -161,9 +170,11 @@ struct virtio_device {
|
||||
struct virtio_device_id id;
|
||||
const struct virtio_config_ops *config;
|
||||
const struct vringh_config_ops *vringh_config;
|
||||
const struct virtio_map_ops *map;
|
||||
struct list_head vqs;
|
||||
VIRTIO_DECLARE_FEATURES(features);
|
||||
void *priv;
|
||||
union virtio_map vmap;
|
||||
#ifdef CONFIG_VIRTIO_DEBUG
|
||||
struct dentry *debugfs_dir;
|
||||
u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS];
|
||||
@@ -262,18 +273,41 @@ void unregister_virtio_driver(struct virtio_driver *drv);
|
||||
module_driver(__virtio_driver, register_virtio_driver, \
|
||||
unregister_virtio_driver)
|
||||
|
||||
dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
|
||||
|
||||
void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
|
||||
union virtio_map mapping_token,
|
||||
size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp);
|
||||
|
||||
void virtqueue_map_free_coherent(struct virtio_device *vdev,
|
||||
union virtio_map mapping_token,
|
||||
size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle);
|
||||
|
||||
dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
|
||||
void virtqueue_unmap_single_attrs(const struct virtqueue *_vq, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
|
||||
int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr);
|
||||
|
||||
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
|
||||
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
|
||||
bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr);
|
||||
void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq, dma_addr_t addr,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
|
||||
void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq, dma_addr_t addr,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
|
||||
@@ -139,6 +139,78 @@ struct virtio_config_ops {
|
||||
int (*enable_vq_after_reset)(struct virtqueue *vq);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct virtio_map_ops - operations for mapping buffer for a virtio device
|
||||
* Note: For transport that has its own mapping logic it must
|
||||
* implements all of the operations
|
||||
* @map_page: map a buffer to the device
|
||||
* map: metadata for performing mapping
|
||||
* page: the page that will be mapped by the device
|
||||
* offset: the offset in the page for a buffer
|
||||
* size: the buffer size
|
||||
* dir: mapping direction
|
||||
* attrs: mapping attributes
|
||||
* Returns: the mapped address
|
||||
* @unmap_page: unmap a buffer from the device
|
||||
* map: device specific mapping map
|
||||
* map_handle: the mapped address
|
||||
* size: the buffer size
|
||||
* dir: mapping direction
|
||||
* attrs: unmapping attributes
|
||||
* @sync_single_for_cpu: sync a single buffer from device to cpu
|
||||
* map: metadata for performing mapping
|
||||
* map_handle: the mapping address to sync
|
||||
* size: the size of the buffer
|
||||
* dir: synchronization direction
|
||||
* @sync_single_for_device: sync a single buffer from cpu to device
|
||||
* map: metadata for performing mapping
|
||||
* map_handle: the mapping address to sync
|
||||
* size: the size of the buffer
|
||||
* dir: synchronization direction
|
||||
* @alloc: alloc a coherent buffer mapping
|
||||
* map: metadata for performing mapping
|
||||
* size: the size of the buffer
|
||||
* map_handle: the mapping address to sync
|
||||
* gfp: allocation flag (GFP_XXX)
|
||||
* Returns: virtual address of the allocated buffer
|
||||
* @free: free a coherent buffer mapping
|
||||
* map: metadata for performing mapping
|
||||
* size: the size of the buffer
|
||||
* vaddr: virtual address of the buffer
|
||||
* map_handle: the mapping address to sync
|
||||
* attrs: unmapping attributes
|
||||
* @need_sync: if the buffer needs synchronization
|
||||
* map: metadata for performing mapping
|
||||
* map_handle: the mapped address
|
||||
* Returns: whether the buffer needs synchronization
|
||||
* @mapping_error: if the mapping address is error
|
||||
* map: metadata for performing mapping
|
||||
* map_handle: the mapped address
|
||||
* @max_mapping_size: get the maximum buffer size that can be mapped
|
||||
* map: metadata for performing mapping
|
||||
* Returns: the maximum buffer size that can be mapped
|
||||
*/
|
||||
struct virtio_map_ops {
|
||||
dma_addr_t (*map_page)(union virtio_map map, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void (*unmap_page)(union virtio_map map, dma_addr_t map_handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
void (*sync_single_for_cpu)(union virtio_map map, dma_addr_t map_handle,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
void (*sync_single_for_device)(union virtio_map map,
|
||||
dma_addr_t map_handle, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void *(*alloc)(union virtio_map map, size_t size,
|
||||
dma_addr_t *map_handle, gfp_t gfp);
|
||||
void (*free)(union virtio_map map, size_t size, void *vaddr,
|
||||
dma_addr_t map_handle, unsigned long attrs);
|
||||
bool (*need_sync)(union virtio_map map, dma_addr_t map_handle);
|
||||
int (*mapping_error)(union virtio_map map, dma_addr_t map_handle);
|
||||
size_t (*max_mapping_size)(union virtio_map map);
|
||||
};
|
||||
|
||||
/* If driver didn't advertise the feature, it will never appear. */
|
||||
void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
|
||||
unsigned int fbit);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#define _LINUX_VIRTIO_RING_H
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <uapi/linux/virtio_ring.h>
|
||||
|
||||
@@ -79,9 +80,9 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
|
||||
|
||||
/*
|
||||
* Creates a virtqueue and allocates the descriptor ring with per
|
||||
* virtqueue DMA device.
|
||||
* virtqueue mapping operations.
|
||||
*/
|
||||
struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
|
||||
struct virtqueue *vring_create_virtqueue_map(unsigned int index,
|
||||
unsigned int num,
|
||||
unsigned int vring_align,
|
||||
struct virtio_device *vdev,
|
||||
@@ -91,7 +92,7 @@ struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
|
||||
bool (*notify)(struct virtqueue *vq),
|
||||
void (*callback)(struct virtqueue *vq),
|
||||
const char *name,
|
||||
struct device *dma_dev);
|
||||
union virtio_map map);
|
||||
|
||||
/*
|
||||
* Creates a virtqueue with a standard layout but a caller-allocated
|
||||
|
||||
Reference in New Issue
Block a user