Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:

 - A new virtio RTC driver

 - vhost scsi now logs write descriptors so migration works

 - Some hardening work in virtio core

 - An old spec compliance issue fixed in vhost net

 - A couple of cleanups, fixes in vringh, virtio-pci, vdpa

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio: reject shm region if length is zero
  virtio_rtc: Add RTC class driver
  virtio_rtc: Add Arm Generic Timer cross-timestamping
  virtio_rtc: Add PTP clocks
  virtio_rtc: Add module and driver core
  vringh: use bvec_kmap_local
  vhost: vringh: Use matching allocation type in resize_iovec()
  virtio-pci: Fix result size returned for the admin command completion
  vdpa/octeon_ep: Control PCI dev enabling manually
  vhost-scsi: log event queue write descriptors
  vhost-scsi: log control queue write descriptors
  vhost-scsi: log I/O queue write descriptors
  vhost-scsi: adjust vhost_scsi_get_desc() to log vring descriptors
  vhost: modify vhost_log_write() for broader users
This commit is contained in:
Linus Torvalds
2025-05-29 08:15:35 -07:00
15 changed files with 2707 additions and 36 deletions

View File

@@ -26073,6 +26073,13 @@ S: Maintained
F: drivers/nvdimm/nd_virtio.c
F: drivers/nvdimm/virtio_pmem.c
VIRTIO RTC DRIVER
M: Peter Hilber <quic_philber@quicinc.com>
L: virtualization@lists.linux.dev
S: Maintained
F: drivers/virtio/virtio_rtc_*
F: include/uapi/linux/virtio_rtc.h
VIRTIO SOUND DRIVER
M: Anton Yakovlev <anton.yakovlev@opensynergy.com>
M: "Michael S. Tsirkin" <mst@redhat.com>

View File

@@ -454,6 +454,9 @@ static void octep_vdpa_remove_pf(struct pci_dev *pdev)
octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
octep_vdpa_pf_bar_expand(octpf);
/* The pf version does not use managed PCI. */
pci_disable_device(pdev);
}
static void octep_vdpa_vf_bar_shrink(struct pci_dev *pdev)
@@ -825,7 +828,7 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
struct octep_pf *octpf;
int ret;
ret = pcim_enable_device(pdev);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(dev, "Failed to enable device\n");
return ret;
@@ -834,15 +837,17 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "No usable DMA configuration\n");
return ret;
goto disable_pci;
}
octpf = devm_kzalloc(dev, sizeof(*octpf), GFP_KERNEL);
if (!octpf)
return -ENOMEM;
if (!octpf) {
ret = -ENOMEM;
goto disable_pci;
}
ret = octep_iomap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
if (ret)
return ret;
goto disable_pci;
pci_set_master(pdev);
pci_set_drvdata(pdev, octpf);
@@ -856,6 +861,8 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
unmap_region:
octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
disable_pci:
pci_disable_device(pdev);
return ret;
}

View File

@@ -133,6 +133,11 @@ struct vhost_scsi_cmd {
struct se_cmd tvc_se_cmd;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/*
* Dirty write descriptors of this command.
*/
struct vhost_log *tvc_log;
unsigned int tvc_log_num;
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list;
/* Used to track inflight cmd */
@@ -258,6 +263,12 @@ struct vhost_scsi_tmf {
struct iovec resp_iov;
int in_iovs;
int vq_desc;
/*
* Dirty write descriptors of this command.
*/
struct vhost_log *tmf_log;
unsigned int tmf_log_num;
};
/*
@@ -362,6 +373,45 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
return tpg->tv_fabric_prot_type;
}
static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq,
struct vhost_scsi_cmd *cmd,
struct vhost_log *log,
unsigned int log_num)
{
if (!cmd->tvc_log)
cmd->tvc_log = kmalloc_array(vq->dev->iov_limit,
sizeof(*cmd->tvc_log),
GFP_KERNEL);
if (unlikely(!cmd->tvc_log)) {
vq_err(vq, "Failed to alloc tvc_log\n");
return -ENOMEM;
}
memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num);
cmd->tvc_log_num = log_num;
return 0;
}
static void vhost_scsi_log_write(struct vhost_virtqueue *vq,
struct vhost_log *log,
unsigned int log_num)
{
if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL)))
return;
if (likely(!log_num || !log))
return;
/*
* vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
* No requirement for vq->iotlb case.
*/
WARN_ON_ONCE(unlikely(vq->iotlb));
vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0);
}
static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
@@ -408,6 +458,10 @@ static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
struct vhost_scsi_inflight *inflight = tmf->inflight;
/*
* tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set.
*/
kfree(tmf->tmf_log);
kfree(tmf);
vhost_scsi_put_inflight(inflight);
}
@@ -517,6 +571,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
struct vhost_log *vq_log;
unsigned int log_num;
unsigned out, in;
int head, ret;
@@ -527,9 +583,19 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
again:
vhost_disable_notify(&vs->dev, vq);
vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
/*
* Reset 'log_num' since vhost_get_vq_desc() may reset it only
* after certain condition checks.
*/
log_num = 0;
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
vq_log, &log_num);
if (head < 0) {
vs->vs_events_missed = true;
return;
@@ -559,6 +625,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
vhost_scsi_log_write(vq, vq_log, log_num);
}
static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
@@ -660,6 +728,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log,
cmd->tvc_log_num);
vhost_scsi_release_cmd_res(se_cmd);
}
@@ -676,6 +747,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *cmd;
struct scatterlist *sgl, *prot_sgl;
struct vhost_log *log;
int tag;
tag = sbitmap_get(&svq->scsi_tags);
@@ -687,9 +759,11 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
cmd = &svq->scsi_cmds[tag];
sgl = cmd->sgl;
prot_sgl = cmd->prot_sgl;
log = cmd->tvc_log;
memset(cmd, 0, sizeof(*cmd));
cmd->sgl = sgl;
cmd->prot_sgl = prot_sgl;
cmd->tvc_log = log;
cmd->tvc_se_cmd.map_tag = tag;
cmd->inflight = vhost_scsi_get_inflight(vq);
@@ -1063,13 +1137,17 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
static int
vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
struct vhost_scsi_ctx *vc,
struct vhost_log *log, unsigned int *log_num)
{
int ret = -ENXIO;
if (likely(log_num))
*log_num = 0;
vc->head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
NULL, NULL);
log, log_num);
pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
vc->head, vc->out, vc->in);
@@ -1221,6 +1299,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
u8 *cdb;
struct vhost_log *vq_log;
unsigned int log_num;
mutex_lock(&vq->mutex);
/*
@@ -1236,8 +1316,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
if (ret)
goto err;
@@ -1386,6 +1469,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
goto err;
}
if (unlikely(vq_log && log_num)) {
ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num);
if (unlikely(ret)) {
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
}
}
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cdb[0], lun);
pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
@@ -1421,11 +1512,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
else if (ret == -EIO) {
vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
else if (ret == -ENOMEM)
vhost_scsi_log_write(vq, vq_log, log_num);
} else if (ret == -ENOMEM) {
vhost_scsi_send_status(vs, vq, &vc,
SAM_STAT_TASK_SET_FULL);
vhost_scsi_log_write(vq, vq_log, log_num);
}
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1467,6 +1561,8 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
mutex_lock(&tmf->svq->vq.mutex);
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
tmf->tmf_log_num);
mutex_unlock(&tmf->svq->vq.mutex);
vhost_scsi_release_tmf_res(tmf);
@@ -1490,7 +1586,8 @@ static void
vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
struct vhost_virtqueue *vq,
struct virtio_scsi_ctrl_tmf_req *vtmf,
struct vhost_scsi_ctx *vc)
struct vhost_scsi_ctx *vc,
struct vhost_log *log, unsigned int log_num)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
@@ -1518,6 +1615,19 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
tmf->in_iovs = vc->in;
tmf->inflight = vhost_scsi_get_inflight(vq);
if (unlikely(log && log_num)) {
tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
GFP_KERNEL);
if (tmf->tmf_log) {
memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num);
tmf->tmf_log_num = log_num;
} else {
pr_err("vhost_scsi tmf log allocation error\n");
vhost_scsi_release_tmf_res(tmf);
goto send_reject;
}
}
if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
vhost_buf_to_lun(vtmf->lun), NULL,
TMR_LUN_RESET, GFP_KERNEL, 0,
@@ -1531,6 +1641,7 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
send_reject:
vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
VIRTIO_SCSI_S_FUNCTION_REJECTED);
vhost_scsi_log_write(vq, log, log_num);
}
static void
@@ -1567,6 +1678,8 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct vhost_scsi_ctx vc;
size_t typ_size;
int ret, c = 0;
struct vhost_log *vq_log;
unsigned int log_num;
mutex_lock(&vq->mutex);
/*
@@ -1580,8 +1693,11 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
if (ret)
goto err;
@@ -1645,9 +1761,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
goto err;
if (v_req.type == VIRTIO_SCSI_T_TMF)
vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
else
vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
vq_log, log_num);
else {
vhost_scsi_send_an_resp(vs, vq, &vc);
vhost_scsi_log_write(vq, vq_log, log_num);
}
err:
/*
* ENXIO: No more requests, or read error, wait for next kick
@@ -1657,11 +1776,13 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
else if (ret == -EIO) {
vhost_scsi_send_bad_target(vs, vq, &vc,
v_req.type == VIRTIO_SCSI_T_TMF ?
TYPE_CTRL_TMF :
TYPE_CTRL_AN);
vhost_scsi_log_write(vq, vq_log, log_num);
}
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1756,6 +1877,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
wait_for_completion(&vs->old_inflight[i]->comp);
}
static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *tv_cmd;
unsigned int i;
if (!svq->scsi_cmds)
return;
for (i = 0; i < svq->max_cmds; i++) {
tv_cmd = &svq->scsi_cmds[i];
kfree(tv_cmd->tvc_log);
tv_cmd->tvc_log = NULL;
tv_cmd->tvc_log_num = 0;
}
}
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
@@ -1775,6 +1914,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
sbitmap_free(&svq->scsi_tags);
kfree(svq->upages);
vhost_scsi_destroy_vq_log(vq);
kfree(svq->scsi_cmds);
svq->scsi_cmds = NULL;
}
@@ -2084,6 +2224,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
struct vhost_virtqueue *vq;
bool is_log, was_log;
int i;
if (features & ~VHOST_SCSI_FEATURES)
@@ -2096,12 +2237,39 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
if (!vs->dev.nvqs)
goto out;
is_log = features & (1 << VHOST_F_LOG_ALL);
/*
* All VQs should have same feature.
*/
was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
}
/*
* If VHOST_F_LOG_ALL is removed, free tvc_log after
* vq->acked_features is committed.
*/
if (!is_log && was_log) {
for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
if (!vs->vqs[i].scsi_cmds)
continue;
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_scsi_destroy_vq_log(vq);
mutex_unlock(&vq->mutex);
}
}
out:
mutex_unlock(&vs->dev.mutex);
return 0;
}

View File

@@ -2304,6 +2304,19 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
return 0;
}
/*
* vhost_log_write() - Log in dirty page bitmap
* @vq: vhost virtqueue.
* @log: Array of dirty memory in GPA.
* @log_num: Size of vhost_log arrary.
* @len: The total length of memory buffer to log in the dirty bitmap.
* Some drivers may only partially use pages shared via the last
* vring descriptor (i.e. vhost-net RX buffer).
* Use (len == U64_MAX) to indicate the driver would log all
* pages of vring descriptors.
* @iov: Array of dirty memory in HVA.
* @count: Size of iovec array.
*/
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len, struct iovec *iov, int count)
{
@@ -2327,15 +2340,14 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
r = log_write(vq->log_base, log[i].addr, l);
if (r < 0)
return r;
len -= l;
if (!len) {
if (vq->log_ctx)
eventfd_signal(vq->log_ctx);
return 0;
}
if (len != U64_MAX)
len -= l;
}
/* Length written exceeds what we have stored. This is a bug. */
BUG();
if (vq->log_ctx)
eventfd_signal(vq->log_ctx);
return 0;
}
EXPORT_SYMBOL_GPL(vhost_log_write);

View File

@@ -225,10 +225,9 @@ static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
if (flag)
new = krealloc_array(iov->iov, new_num,
sizeof(struct iovec), gfp);
new = krealloc_array(iov->iov, new_num, sizeof(*new), gfp);
else {
new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
new = kmalloc_array(new_num, sizeof(*new), gfp);
if (new) {
memcpy(new, iov->iov,
iov->max_num * sizeof(struct iovec));
@@ -1291,11 +1290,10 @@ static inline int getu16_iotlb(const struct vringh *vrh,
if (ret)
return ret;
} else {
void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
void *from = kaddr + ivec.iov.bvec[0].bv_offset;
__virtio16 *from = bvec_kmap_local(&ivec.iov.bvec[0]);
tmp = READ_ONCE(*(__virtio16 *)from);
kunmap_local(kaddr);
tmp = READ_ONCE(*from);
kunmap_local(from);
}
*val = vringh16_to_cpu(vrh, tmp);
@@ -1330,11 +1328,10 @@ static inline int putu16_iotlb(const struct vringh *vrh,
if (ret)
return ret;
} else {
void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
void *to = kaddr + ivec.iov.bvec[0].bv_offset;
__virtio16 *to = bvec_kmap_local(&ivec.iov.bvec[0]);
WRITE_ONCE(*(__virtio16 *)to, tmp);
kunmap_local(kaddr);
WRITE_ONCE(*to, tmp);
kunmap_local(to);
}
return 0;

View File

@@ -188,4 +188,68 @@ config VIRTIO_DEBUG
If unsure, say N.
config VIRTIO_RTC
tristate "Virtio RTC driver"
depends on VIRTIO
depends on PTP_1588_CLOCK_OPTIONAL
help
This driver provides current time from a Virtio RTC device. The driver
provides the time through one or more clocks. The Virtio RTC PTP
clocks and/or the Real Time Clock driver for Virtio RTC must be
enabled to expose the clocks to userspace.
To compile this code as a module, choose M here: the module will be
called virtio_rtc.
If unsure, say M.
if VIRTIO_RTC
comment "WARNING: Consider enabling VIRTIO_RTC_PTP and/or VIRTIO_RTC_CLASS."
depends on !VIRTIO_RTC_PTP && !VIRTIO_RTC_CLASS
comment "Enable PTP_1588_CLOCK in order to enable VIRTIO_RTC_PTP."
depends on PTP_1588_CLOCK=n
config VIRTIO_RTC_PTP
bool "Virtio RTC PTP clocks"
default y
depends on PTP_1588_CLOCK
help
This exposes any Virtio RTC clocks as PTP Hardware Clocks (PHCs) to
userspace. The PHC sysfs attribute "clock_name" describes the clock
type.
If unsure, say Y.
config VIRTIO_RTC_ARM
bool "Virtio RTC cross-timestamping using Arm Generic Timer"
default y
depends on VIRTIO_RTC_PTP && ARM_ARCH_TIMER
help
This enables Virtio RTC cross-timestamping using the Arm Generic Timer.
It only has an effect if the Virtio RTC device also supports this. The
cross-timestamp is available through the PTP clock driver precise
cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE2 aka
PTP_SYS_OFFSET_PRECISE).
If unsure, say Y.
comment "Enable RTC_CLASS in order to enable VIRTIO_RTC_CLASS."
depends on RTC_CLASS=n
config VIRTIO_RTC_CLASS
bool "Real Time Clock driver for Virtio RTC"
default y
depends on RTC_CLASS
help
This exposes the Virtio RTC UTC-like clock as a Linux Real Time Clock.
It only has an effect if the Virtio RTC device has a UTC-like clock
which smears leap seconds to avoid steps. The Real Time Clock is
read-only, and may support setting an alarm.
If unsure, say Y.
endif # VIRTIO_RTC
endif # VIRTIO_MENU

View File

@@ -14,3 +14,8 @@ obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o
obj-$(CONFIG_VIRTIO_DEBUG) += virtio_debug.o
obj-$(CONFIG_VIRTIO_RTC) += virtio_rtc.o
virtio_rtc-y := virtio_rtc_driver.o
virtio_rtc-$(CONFIG_VIRTIO_RTC_PTP) += virtio_rtc_ptp.o
virtio_rtc-$(CONFIG_VIRTIO_RTC_ARM) += virtio_rtc_arm.o
virtio_rtc-$(CONFIG_VIRTIO_RTC_CLASS) += virtio_rtc_class.o

View File

@@ -48,6 +48,7 @@ void vp_modern_avq_done(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
unsigned int status_size = sizeof(struct virtio_admin_cmd_status);
struct virtio_admin_cmd *cmd;
unsigned long flags;
unsigned int len;
@@ -56,7 +57,17 @@ void vp_modern_avq_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((cmd = virtqueue_get_buf(vq, &len))) {
cmd->result_sg_size = len;
/* If the number of bytes written by the device is less
* than the size of struct virtio_admin_cmd_status, the
* remaining status bytes will remain zero-initialized,
* since the buffer was zeroed during allocation.
* In this case, set the size of command_specific_result
* to 0.
*/
if (len < status_size)
cmd->result_sg_size = 0;
else
cmd->result_sg_size = len - status_size;
complete(&cmd->completion);
}
} while (!virtqueue_enable_cb(vq));

View File

@@ -0,0 +1,23 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Provides cross-timestamp params for Arm.
*
* Copyright (C) 2022-2023 OpenSynergy GmbH
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clocksource_ids.h>
#include <uapi/linux/virtio_rtc.h>
#include "virtio_rtc_internal.h"
/* see header for doc */
int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
{
*hw_counter = VIRTIO_RTC_COUNTER_ARM_VCT;
*cs_id = CSID_ARM_ARCH_COUNTER;
return 0;
}

View File

@@ -0,0 +1,262 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* virtio_rtc RTC class driver
*
* Copyright (C) 2023 OpenSynergy GmbH
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/math64.h>
#include <linux/overflow.h>
#include <linux/rtc.h>
#include <linux/time64.h>
#include <uapi/linux/virtio_rtc.h>
#include "virtio_rtc_internal.h"
/**
* struct viortc_class - RTC class wrapper
* @viortc: virtio_rtc device data
* @rtc: RTC device
* @vio_clk_id: virtio_rtc clock id
* @stopped: Whether RTC ops are disallowed. Access protected by rtc_lock().
*/
struct viortc_class {
struct viortc_dev *viortc;
struct rtc_device *rtc;
u16 vio_clk_id;
bool stopped;
};
/**
* viortc_class_get_locked() - get RTC class wrapper, if ops allowed
* @dev: virtio device
*
* Gets the RTC class wrapper from the virtio device, if it is available and
* ops are allowed.
*
* Context: Caller must hold rtc_lock().
* Return: RTC class wrapper if available and ops allowed, ERR_PTR otherwise.
*/
static struct viortc_class *viortc_class_get_locked(struct device *dev)
{
struct viortc_class *viortc_class;
viortc_class = viortc_class_from_dev(dev);
if (IS_ERR(viortc_class))
return viortc_class;
if (viortc_class->stopped)
return ERR_PTR(-EBUSY);
return viortc_class;
}
/**
* viortc_class_read_time() - RTC class op read_time
* @dev: virtio device
* @tm: read time
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_class_read_time(struct device *dev, struct rtc_time *tm)
{
struct viortc_class *viortc_class;
time64_t sec;
int ret;
u64 ns;
viortc_class = viortc_class_get_locked(dev);
if (IS_ERR(viortc_class))
return PTR_ERR(viortc_class);
ret = viortc_read(viortc_class->viortc, viortc_class->vio_clk_id, &ns);
if (ret)
return ret;
sec = div_u64(ns, NSEC_PER_SEC);
rtc_time64_to_tm(sec, tm);
return 0;
}
/**
* viortc_class_read_alarm() - RTC class op read_alarm
* @dev: virtio device
* @alrm: alarm read out
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_class_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct viortc_class *viortc_class;
time64_t alarm_time_sec;
u64 alarm_time_ns;
bool enabled;
int ret;
viortc_class = viortc_class_get_locked(dev);
if (IS_ERR(viortc_class))
return PTR_ERR(viortc_class);
ret = viortc_read_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
&alarm_time_ns, &enabled);
if (ret)
return ret;
alarm_time_sec = div_u64(alarm_time_ns, NSEC_PER_SEC);
rtc_time64_to_tm(alarm_time_sec, &alrm->time);
alrm->enabled = enabled;
return 0;
}
/**
* viortc_class_set_alarm() - RTC class op set_alarm
* @dev: virtio device
* @alrm: alarm to set
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_class_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct viortc_class *viortc_class;
time64_t alarm_time_sec;
u64 alarm_time_ns;
viortc_class = viortc_class_get_locked(dev);
if (IS_ERR(viortc_class))
return PTR_ERR(viortc_class);
alarm_time_sec = rtc_tm_to_time64(&alrm->time);
if (alarm_time_sec < 0)
return -EINVAL;
if (check_mul_overflow((u64)alarm_time_sec, (u64)NSEC_PER_SEC,
&alarm_time_ns))
return -EINVAL;
return viortc_set_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
alarm_time_ns, alrm->enabled);
}
/**
* viortc_class_alarm_irq_enable() - RTC class op alarm_irq_enable
* @dev: virtio device
* @enabled: enable or disable alarm IRQ
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_class_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct viortc_class *viortc_class;
viortc_class = viortc_class_get_locked(dev);
if (IS_ERR(viortc_class))
return PTR_ERR(viortc_class);
return viortc_set_alarm_enabled(viortc_class->viortc,
viortc_class->vio_clk_id, enabled);
}
static const struct rtc_class_ops viortc_class_ops = {
.read_time = viortc_class_read_time,
.read_alarm = viortc_class_read_alarm,
.set_alarm = viortc_class_set_alarm,
.alarm_irq_enable = viortc_class_alarm_irq_enable,
};
/**
* viortc_class_alarm() - propagate alarm notification as alarm interrupt
* @viortc_class: RTC class wrapper
* @vio_clk_id: virtio_rtc clock id
*
* Context: Any context.
*/
void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id)
{
if (vio_clk_id != viortc_class->vio_clk_id) {
dev_warn_ratelimited(&viortc_class->rtc->dev,
"ignoring alarm for clock id %d, expected id %d\n",
vio_clk_id, viortc_class->vio_clk_id);
return;
}
rtc_update_irq(viortc_class->rtc, 1, RTC_AF | RTC_IRQF);
}
/**
* viortc_class_stop() - disallow RTC class ops
* @viortc_class: RTC class wrapper
*
* Context: Process context. Caller must NOT hold rtc_lock().
*/
void viortc_class_stop(struct viortc_class *viortc_class)
{
rtc_lock(viortc_class->rtc);
viortc_class->stopped = true;
rtc_unlock(viortc_class->rtc);
}
/**
* viortc_class_register() - register RTC class device
* @viortc_class: RTC class wrapper
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
int viortc_class_register(struct viortc_class *viortc_class)
{
return devm_rtc_register_device(viortc_class->rtc);
}
/**
* viortc_class_init() - init RTC class wrapper and device
* @viortc: device data
* @vio_clk_id: virtio_rtc clock id
* @have_alarm: have alarm feature
* @parent_dev: virtio device
*
* Context: Process context.
* Return: RTC class wrapper on success, ERR_PTR otherwise.
*/
struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
u16 vio_clk_id, bool have_alarm,
struct device *parent_dev)
{
struct viortc_class *viortc_class;
struct rtc_device *rtc;
viortc_class =
devm_kzalloc(parent_dev, sizeof(*viortc_class), GFP_KERNEL);
if (!viortc_class)
return ERR_PTR(-ENOMEM);
rtc = devm_rtc_allocate_device(parent_dev);
if (IS_ERR(rtc))
return ERR_CAST(rtc);
viortc_class->viortc = viortc;
viortc_class->rtc = rtc;
viortc_class->vio_clk_id = vio_clk_id;
if (!have_alarm)
clear_bit(RTC_FEATURE_ALARM, rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
rtc->ops = &viortc_class_ops;
rtc->range_max = div_u64(U64_MAX, NSEC_PER_SEC);
return viortc_class;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,122 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* virtio_rtc internal interfaces
*
* Copyright (C) 2022-2023 OpenSynergy GmbH
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _VIRTIO_RTC_INTERNAL_H_
#define _VIRTIO_RTC_INTERNAL_H_
#include <linux/device.h>
#include <linux/err.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
/* driver core IFs */
struct viortc_dev;
int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading);
int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
u64 *reading, u64 *cycles);
int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
bool *supported);
int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
u64 *alarm_time, bool *enabled);
int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
bool alarm_enable);
int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
bool alarm_enable);
struct viortc_class;
struct viortc_class *viortc_class_from_dev(struct device *dev);
/* PTP IFs */
struct viortc_ptp_clock;
#if IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)
struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
struct device *parent_dev,
u16 vio_clk_id,
const char *ptp_clock_name);
int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
struct device *parent_dev);
#else
static inline struct viortc_ptp_clock *
viortc_ptp_register(struct viortc_dev *viortc, struct device *parent_dev,
u16 vio_clk_id, const char *ptp_clock_name)
{
return NULL;
}
static inline int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
struct device *parent_dev)
{
return -ENODEV;
}
#endif
/* HW counter IFs */
/**
* viortc_hw_xtstamp_params() - get HW-specific xtstamp params
* @hw_counter: virtio_rtc HW counter type
* @cs_id: clocksource id corresponding to hw_counter
*
* Gets the HW-specific xtstamp params. Returns an error if the driver cannot
* support xtstamp.
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id);
/* RTC class IFs */
#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id);
void viortc_class_stop(struct viortc_class *viortc_class);
int viortc_class_register(struct viortc_class *viortc_class);
struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
u16 vio_clk_id, bool have_alarm,
struct device *parent_dev);
#else /* CONFIG_VIRTIO_RTC_CLASS */
static inline void viortc_class_alarm(struct viortc_class *viortc_class,
u16 vio_clk_id)
{
}
static inline void viortc_class_stop(struct viortc_class *viortc_class)
{
}
static inline int viortc_class_register(struct viortc_class *viortc_class)
{
return -ENODEV;
}
static inline struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
u16 vio_clk_id,
bool have_alarm,
struct device *parent_dev)
{
return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_VIRTIO_RTC_CLASS */
#endif /* _VIRTIO_RTC_INTERNAL_H_ */

View File

@@ -0,0 +1,347 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Expose virtio_rtc clocks as PTP clocks.
*
* Copyright (C) 2022-2023 OpenSynergy GmbH
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Derived from ptp_kvm_common.c, virtual PTP 1588 clock for use with KVM
* guests.
*
* Copyright (C) 2017 Red Hat Inc.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/ptp_clock_kernel.h>
#include <uapi/linux/virtio_rtc.h>
#include "virtio_rtc_internal.h"
/**
* struct viortc_ptp_clock - PTP clock abstraction
* @ptp_clock: PTP clock handle for unregistering
* @viortc: virtio_rtc device data
* @ptp_info: PTP clock description
* @vio_clk_id: virtio_rtc clock id
* @have_cross: device supports crosststamp with available HW counter
*/
struct viortc_ptp_clock {
struct ptp_clock *ptp_clock;
struct viortc_dev *viortc;
struct ptp_clock_info ptp_info;
u16 vio_clk_id;
bool have_cross;
};
/**
* struct viortc_ptp_cross_ctx - context for get_device_system_crosststamp()
* @device_time: device clock reading
* @system_counterval: HW counter value at device_time
*
* Provides the already obtained crosststamp to get_device_system_crosststamp().
*/
struct viortc_ptp_cross_ctx {
ktime_t device_time;
struct system_counterval_t system_counterval;
};
/* Weak function in case get_device_system_crosststamp() is not supported */
int __weak viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
{
return -EOPNOTSUPP;
}
/**
* viortc_ptp_get_time_fn() - callback for get_device_system_crosststamp()
* @device_time: device clock reading
* @system_counterval: HW counter value at device_time
* @ctx: context with already obtained crosststamp
*
* Return: zero (success).
*/
static int viortc_ptp_get_time_fn(ktime_t *device_time,
struct system_counterval_t *system_counterval,
void *ctx)
{
struct viortc_ptp_cross_ctx *vio_ctx = ctx;
*device_time = vio_ctx->device_time;
*system_counterval = vio_ctx->system_counterval;
return 0;
}
/**
* viortc_ptp_do_xtstamp() - get crosststamp from device
* @vio_ptp: virtio_rtc PTP clock
* @hw_counter: virtio_rtc HW counter type
* @cs_id: clocksource id corresponding to hw_counter
* @ctx: context for get_device_system_crosststamp()
*
* Reads HW-specific crosststamp from device.
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_ptp_do_xtstamp(struct viortc_ptp_clock *vio_ptp,
u8 hw_counter, enum clocksource_ids cs_id,
struct viortc_ptp_cross_ctx *ctx)
{
u64 max_ns, ns;
int ret;
ctx->system_counterval.cs_id = cs_id;
ret = viortc_read_cross(vio_ptp->viortc, vio_ptp->vio_clk_id,
hw_counter, &ns,
&ctx->system_counterval.cycles);
if (ret)
return ret;
max_ns = (u64)ktime_to_ns(KTIME_MAX);
if (ns > max_ns)
return -EINVAL;
ctx->device_time = ns_to_ktime(ns);
return 0;
}
/*
* PTP clock operations
*/
/**
* viortc_ptp_getcrosststamp() - PTP clock getcrosststamp op
* @ptp: PTP clock info
* @xtstamp: crosststamp
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *xtstamp)
{
struct viortc_ptp_clock *vio_ptp =
container_of(ptp, struct viortc_ptp_clock, ptp_info);
struct system_time_snapshot history_begin;
struct viortc_ptp_cross_ctx ctx;
enum clocksource_ids cs_id;
u8 hw_counter;
int ret;
if (!vio_ptp->have_cross)
return -EOPNOTSUPP;
ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
if (ret)
return ret;
ktime_get_snapshot(&history_begin);
if (history_begin.cs_id != cs_id)
return -EOPNOTSUPP;
/*
* Getting the timestamp can take many milliseconds with a slow Virtio
* device. This is too long for viortc_ptp_get_time_fn() passed to
* get_device_system_crosststamp(), which has to usually return before
* the timekeeper seqcount increases (every tick or so).
*
* So, get the actual cross-timestamp first.
*/
ret = viortc_ptp_do_xtstamp(vio_ptp, hw_counter, cs_id, &ctx);
if (ret)
return ret;
ret = get_device_system_crosststamp(viortc_ptp_get_time_fn, &ctx,
&history_begin, xtstamp);
if (ret)
pr_debug("%s: get_device_system_crosststamp() returned %d\n",
__func__, ret);
return ret;
}
/* viortc_ptp_adjfine() - unsupported PTP clock adjfine op */
static int viortc_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
return -EOPNOTSUPP;
}
/* viortc_ptp_adjtime() - unsupported PTP clock adjtime op */
static int viortc_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
return -EOPNOTSUPP;
}
/* viortc_ptp_settime64() - unsupported PTP clock settime64 op */
static int viortc_ptp_settime64(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
return -EOPNOTSUPP;
}
/*
* viortc_ptp_gettimex64() - PTP clock gettimex64 op
*
* Context: Process context.
*/
static int viortc_ptp_gettimex64(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct viortc_ptp_clock *vio_ptp =
container_of(ptp, struct viortc_ptp_clock, ptp_info);
int ret;
u64 ns;
ptp_read_system_prets(sts);
ret = viortc_read(vio_ptp->viortc, vio_ptp->vio_clk_id, &ns);
ptp_read_system_postts(sts);
if (ret)
return ret;
if (ns > (u64)S64_MAX)
return -EINVAL;
*ts = ns_to_timespec64((s64)ns);
return 0;
}
/* viortc_ptp_enable() - unsupported PTP clock enable op */
static int viortc_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
/*
* viortc_ptp_info_template - ptp_clock_info template
*
* The .name member will be set for individual virtio_rtc PTP clocks.
*
* The .getcrosststamp member will be cleared for PTP clocks not supporting
* crosststamp.
*/
static const struct ptp_clock_info viortc_ptp_info_template = {
.owner = THIS_MODULE,
/* .name is set according to clock type */
.adjfine = viortc_ptp_adjfine,
.adjtime = viortc_ptp_adjtime,
.gettimex64 = viortc_ptp_gettimex64,
.settime64 = viortc_ptp_settime64,
.enable = viortc_ptp_enable,
.getcrosststamp = viortc_ptp_getcrosststamp,
};
/**
* viortc_ptp_unregister() - PTP clock unregistering wrapper
* @vio_ptp: virtio_rtc PTP clock
* @parent_dev: parent device of PTP clock
*
* Return: Zero on success, negative error code otherwise.
*/
int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
struct device *parent_dev)
{
int ret = ptp_clock_unregister(vio_ptp->ptp_clock);
if (!ret)
devm_kfree(parent_dev, vio_ptp);
return ret;
}
/**
* viortc_ptp_get_cross_cap() - get xtstamp support info from device
* @viortc: virtio_rtc device data
* @vio_ptp: virtio_rtc PTP clock abstraction
*
* Context: Process context.
* Return: Zero on success, negative error code otherwise.
*/
static int viortc_ptp_get_cross_cap(struct viortc_dev *viortc,
struct viortc_ptp_clock *vio_ptp)
{
enum clocksource_ids cs_id;
bool xtstamp_supported;
u8 hw_counter;
int ret;
ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
if (ret) {
vio_ptp->have_cross = false;
return 0;
}
ret = viortc_cross_cap(viortc, vio_ptp->vio_clk_id, hw_counter,
&xtstamp_supported);
if (ret)
return ret;
vio_ptp->have_cross = xtstamp_supported;
return 0;
}
/**
* viortc_ptp_register() - prepare and register PTP clock
* @viortc: virtio_rtc device data
* @parent_dev: parent device for PTP clock
* @vio_clk_id: id of virtio_rtc clock which backs PTP clock
* @ptp_clock_name: PTP clock name
*
* Context: Process context.
* Return: Pointer on success, ERR_PTR() otherwise; NULL if PTP clock support
* not available.
*/
struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
struct device *parent_dev,
u16 vio_clk_id,
const char *ptp_clock_name)
{
struct viortc_ptp_clock *vio_ptp;
struct ptp_clock *ptp_clock;
ssize_t len;
int ret;
vio_ptp = devm_kzalloc(parent_dev, sizeof(*vio_ptp), GFP_KERNEL);
if (!vio_ptp)
return ERR_PTR(-ENOMEM);
vio_ptp->viortc = viortc;
vio_ptp->vio_clk_id = vio_clk_id;
vio_ptp->ptp_info = viortc_ptp_info_template;
len = strscpy(vio_ptp->ptp_info.name, ptp_clock_name,
sizeof(vio_ptp->ptp_info.name));
if (len < 0) {
ret = len;
goto err_free_dev;
}
ret = viortc_ptp_get_cross_cap(viortc, vio_ptp);
if (ret)
goto err_free_dev;
if (!vio_ptp->have_cross)
vio_ptp->ptp_info.getcrosststamp = NULL;
ptp_clock = ptp_clock_register(&vio_ptp->ptp_info, parent_dev);
if (IS_ERR(ptp_clock))
goto err_on_register;
vio_ptp->ptp_clock = ptp_clock;
return vio_ptp;
err_on_register:
ret = PTR_ERR(ptp_clock);
err_free_dev:
devm_kfree(parent_dev, vio_ptp);
return ERR_PTR(ret);
}

View File

@@ -329,6 +329,8 @@ static inline
bool virtio_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
if (!region->len)
return false;
if (!vdev->config->get_shm_region)
return false;
return vdev->config->get_shm_region(vdev, region, id);

View File

@@ -0,0 +1,237 @@
/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* Copyright (C) 2022-2024 OpenSynergy GmbH
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _LINUX_VIRTIO_RTC_H
#define _LINUX_VIRTIO_RTC_H
#include <linux/types.h>
/* alarm feature */
#define VIRTIO_RTC_F_ALARM 0
/* read request message types */
#define VIRTIO_RTC_REQ_READ 0x0001
#define VIRTIO_RTC_REQ_READ_CROSS 0x0002
/* control request message types */
#define VIRTIO_RTC_REQ_CFG 0x1000
#define VIRTIO_RTC_REQ_CLOCK_CAP 0x1001
#define VIRTIO_RTC_REQ_CROSS_CAP 0x1002
#define VIRTIO_RTC_REQ_READ_ALARM 0x1003
#define VIRTIO_RTC_REQ_SET_ALARM 0x1004
#define VIRTIO_RTC_REQ_SET_ALARM_ENABLED 0x1005
/* alarmq message types */
#define VIRTIO_RTC_NOTIF_ALARM 0x2000
/* Message headers */
/** common request header */
struct virtio_rtc_req_head {
__le16 msg_type;
__u8 reserved[6];
};
/** common response header */
struct virtio_rtc_resp_head {
#define VIRTIO_RTC_S_OK 0
#define VIRTIO_RTC_S_EOPNOTSUPP 2
#define VIRTIO_RTC_S_ENODEV 3
#define VIRTIO_RTC_S_EINVAL 4
#define VIRTIO_RTC_S_EIO 5
__u8 status;
__u8 reserved[7];
};
/** common notification header */
struct virtio_rtc_notif_head {
__le16 msg_type;
__u8 reserved[6];
};
/* read requests */
/* VIRTIO_RTC_REQ_READ message */
struct virtio_rtc_req_read {
struct virtio_rtc_req_head head;
__le16 clock_id;
__u8 reserved[6];
};
struct virtio_rtc_resp_read {
struct virtio_rtc_resp_head head;
__le64 clock_reading;
};
/* VIRTIO_RTC_REQ_READ_CROSS message */
struct virtio_rtc_req_read_cross {
struct virtio_rtc_req_head head;
__le16 clock_id;
/* Arm Generic Timer Counter-timer Virtual Count Register (CNTVCT_EL0) */
#define VIRTIO_RTC_COUNTER_ARM_VCT 0
/* x86 Time-Stamp Counter */
#define VIRTIO_RTC_COUNTER_X86_TSC 1
/* Invalid */
#define VIRTIO_RTC_COUNTER_INVALID 0xFF
__u8 hw_counter;
__u8 reserved[5];
};
struct virtio_rtc_resp_read_cross {
struct virtio_rtc_resp_head head;
__le64 clock_reading;
__le64 counter_cycles;
};
/* control requests */
/* VIRTIO_RTC_REQ_CFG message */
struct virtio_rtc_req_cfg {
struct virtio_rtc_req_head head;
/* no request params */
};
struct virtio_rtc_resp_cfg {
struct virtio_rtc_resp_head head;
/** # of clocks -> clock ids < num_clocks are valid */
__le16 num_clocks;
__u8 reserved[6];
};
/* VIRTIO_RTC_REQ_CLOCK_CAP message */
struct virtio_rtc_req_clock_cap {
struct virtio_rtc_req_head head;
__le16 clock_id;
__u8 reserved[6];
};
struct virtio_rtc_resp_clock_cap {
struct virtio_rtc_resp_head head;
#define VIRTIO_RTC_CLOCK_UTC 0
#define VIRTIO_RTC_CLOCK_TAI 1
#define VIRTIO_RTC_CLOCK_MONOTONIC 2
#define VIRTIO_RTC_CLOCK_UTC_SMEARED 3
#define VIRTIO_RTC_CLOCK_UTC_MAYBE_SMEARED 4
__u8 type;
#define VIRTIO_RTC_SMEAR_UNSPECIFIED 0
#define VIRTIO_RTC_SMEAR_NOON_LINEAR 1
#define VIRTIO_RTC_SMEAR_UTC_SLS 2
__u8 leap_second_smearing;
#define VIRTIO_RTC_FLAG_ALARM_CAP (1 << 0)
__u8 flags;
__u8 reserved[5];
};
/* VIRTIO_RTC_REQ_CROSS_CAP message */
struct virtio_rtc_req_cross_cap {
struct virtio_rtc_req_head head;
__le16 clock_id;
__u8 hw_counter;
__u8 reserved[5];
};
struct virtio_rtc_resp_cross_cap {
struct virtio_rtc_resp_head head;
#define VIRTIO_RTC_FLAG_CROSS_CAP (1 << 0)
__u8 flags;
__u8 reserved[7];
};
/* VIRTIO_RTC_REQ_READ_ALARM message */
struct virtio_rtc_req_read_alarm {
struct virtio_rtc_req_head head;
__le16 clock_id;
__u8 reserved[6];
};
struct virtio_rtc_resp_read_alarm {
struct virtio_rtc_resp_head head;
__le64 alarm_time;
#define VIRTIO_RTC_FLAG_ALARM_ENABLED (1 << 0)
__u8 flags;
__u8 reserved[7];
};
/* VIRTIO_RTC_REQ_SET_ALARM message */
struct virtio_rtc_req_set_alarm {
struct virtio_rtc_req_head head;
__le64 alarm_time;
__le16 clock_id;
/* flag VIRTIO_RTC_FLAG_ALARM_ENABLED */
__u8 flags;
__u8 reserved[5];
};
struct virtio_rtc_resp_set_alarm {
struct virtio_rtc_resp_head head;
/* no response params */
};
/* VIRTIO_RTC_REQ_SET_ALARM_ENABLED message */
struct virtio_rtc_req_set_alarm_enabled {
struct virtio_rtc_req_head head;
__le16 clock_id;
/* flag VIRTIO_RTC_ALARM_ENABLED */
__u8 flags;
__u8 reserved[5];
};
struct virtio_rtc_resp_set_alarm_enabled {
struct virtio_rtc_resp_head head;
/* no response params */
};
/** Union of request types for requestq */
union virtio_rtc_req_requestq {
struct virtio_rtc_req_read read;
struct virtio_rtc_req_read_cross read_cross;
struct virtio_rtc_req_cfg cfg;
struct virtio_rtc_req_clock_cap clock_cap;
struct virtio_rtc_req_cross_cap cross_cap;
struct virtio_rtc_req_read_alarm read_alarm;
struct virtio_rtc_req_set_alarm set_alarm;
struct virtio_rtc_req_set_alarm_enabled set_alarm_enabled;
};
/** Union of response types for requestq */
union virtio_rtc_resp_requestq {
struct virtio_rtc_resp_read read;
struct virtio_rtc_resp_read_cross read_cross;
struct virtio_rtc_resp_cfg cfg;
struct virtio_rtc_resp_clock_cap clock_cap;
struct virtio_rtc_resp_cross_cap cross_cap;
struct virtio_rtc_resp_read_alarm read_alarm;
struct virtio_rtc_resp_set_alarm set_alarm;
struct virtio_rtc_resp_set_alarm_enabled set_alarm_enabled;
};
/* alarmq notifications */
/* VIRTIO_RTC_NOTIF_ALARM notification */
struct virtio_rtc_notif_alarm {
struct virtio_rtc_notif_head head;
__le16 clock_id;
__u8 reserved[6];
};
/** Union of notification types for alarmq */
union virtio_rtc_notif_alarmq {
struct virtio_rtc_notif_alarm alarm;
};
#endif /* _LINUX_VIRTIO_RTC_H */