mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-17 18:59:07 -05:00
Merge tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull cxl updates from Dan Williams:
"Compute Express Link (CXL) updates for 6.0:
- Introduce a 'struct cxl_region' object with support for
provisioning and assembling persistent memory regions.
- Introduce alloc_free_mem_region() to accompany the existing
request_free_mem_region() as a method to allocate physical memory
capacity out of an existing resource.
- Export insert_resource_expand_to_fit() for the CXL subsystem to
late-publish CXL platform windows in iomem_resource.
- Add a polled mode PCI DOE (Data Object Exchange) driver service and
use it in cxl_pci to retrieve the CDAT (Coherent Device Attribute
Table)"
* tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (74 commits)
cxl/hdm: Fix skip allocations vs multiple pmem allocations
cxl/region: Disallow region granularity != window granularity
cxl/region: Fix x1 interleave to greater than x1 interleave routing
cxl/region: Move HPA setup to cxl_region_attach()
cxl/region: Fix decoder interleave programming
Documentation: cxl: remove dangling kernel-doc reference
cxl/region: describe targets and nr_targets members of cxl_region_params
cxl/regions: add padding for cxl_rr_ep_add nested lists
cxl/region: Fix IS_ERR() vs NULL check
cxl/region: Fix region reference target accounting
cxl/region: Fix region commit uninitialized variable warning
cxl/region: Fix port setup uninitialized variable warnings
cxl/region: Stop initializing interleave granularity
cxl/hdm: Fix DPA reservation vs cxl_endpoint_decoder lifetime
cxl/acpi: Minimize granularity for x1 interleaves
cxl/region: Delete 'region' attribute from root decoders
cxl/acpi: Autoload driver for 'cxl_acpi' test devices
cxl/region: decrement ->nr_targets on error in cxl_region_attach()
cxl/region: prevent underflow in ways_to_cxl()
cxl/region: uninitialized variable in alloc_hpa()
...
This commit is contained in:
@@ -141,6 +141,7 @@ enum {
|
||||
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
|
||||
IORES_DESC_RESERVED = 7,
|
||||
IORES_DESC_SOFT_RESERVED = 8,
|
||||
IORES_DESC_CXL = 9,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -329,6 +330,8 @@ struct resource *devm_request_free_mem_region(struct device *dev,
|
||||
struct resource *base, unsigned long size);
|
||||
struct resource *request_free_mem_region(struct resource *base,
|
||||
unsigned long size, const char *name);
|
||||
struct resource *alloc_free_mem_region(struct resource *base,
|
||||
unsigned long size, unsigned long align, const char *name);
|
||||
|
||||
static inline void irqresource_disabled(struct resource *res, u32 irq)
|
||||
{
|
||||
|
||||
@@ -59,6 +59,9 @@ enum {
|
||||
/* Platform provides asynchronous flush mechanism */
|
||||
ND_REGION_ASYNC = 3,
|
||||
|
||||
/* Region was created by CXL subsystem */
|
||||
ND_REGION_CXL = 4,
|
||||
|
||||
/* mark newly adjusted resources as requiring a label update */
|
||||
DPA_RESOURCE_ADJUSTED = 1 << 0,
|
||||
};
|
||||
@@ -122,6 +125,7 @@ struct nd_region_desc {
|
||||
int numa_node;
|
||||
int target_node;
|
||||
unsigned long flags;
|
||||
int memregion;
|
||||
struct device_node *of_node;
|
||||
int (*flush)(struct nd_region *nd_region, struct bio *bio);
|
||||
};
|
||||
@@ -259,6 +263,7 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
|
||||
cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
|
||||
}
|
||||
void nvdimm_delete(struct nvdimm *nvdimm);
|
||||
void nvdimm_region_delete(struct nd_region *nd_region);
|
||||
|
||||
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
|
||||
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
|
||||
|
||||
77
include/linux/pci-doe.h
Normal file
77
include/linux/pci-doe.h
Normal file
@@ -0,0 +1,77 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Data Object Exchange
|
||||
* PCIe r6.0, sec 6.30 DOE
|
||||
*
|
||||
* Copyright (C) 2021 Huawei
|
||||
* Jonathan Cameron <Jonathan.Cameron@huawei.com>
|
||||
*
|
||||
* Copyright (C) 2022 Intel Corporation
|
||||
* Ira Weiny <ira.weiny@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef LINUX_PCI_DOE_H
|
||||
#define LINUX_PCI_DOE_H
|
||||
|
||||
struct pci_doe_protocol {
|
||||
u16 vid;
|
||||
u8 type;
|
||||
};
|
||||
|
||||
struct pci_doe_mb;
|
||||
|
||||
/**
|
||||
* struct pci_doe_task - represents a single query/response
|
||||
*
|
||||
* @prot: DOE Protocol
|
||||
* @request_pl: The request payload
|
||||
* @request_pl_sz: Size of the request payload (bytes)
|
||||
* @response_pl: The response payload
|
||||
* @response_pl_sz: Size of the response payload (bytes)
|
||||
* @rv: Return value. Length of received response or error (bytes)
|
||||
* @complete: Called when task is complete
|
||||
* @private: Private data for the consumer
|
||||
* @work: Used internally by the mailbox
|
||||
* @doe_mb: Used internally by the mailbox
|
||||
*
|
||||
* The payload sizes and rv are specified in bytes with the following
|
||||
* restrictions concerning the protocol.
|
||||
*
|
||||
* 1) The request_pl_sz must be a multiple of double words (4 bytes)
|
||||
* 2) The response_pl_sz must be >= a single double word (4 bytes)
|
||||
* 3) rv is returned as bytes but it will be a multiple of double words
|
||||
*
|
||||
* NOTE there is no need for the caller to initialize work or doe_mb.
|
||||
*/
|
||||
struct pci_doe_task {
|
||||
struct pci_doe_protocol prot;
|
||||
u32 *request_pl;
|
||||
size_t request_pl_sz;
|
||||
u32 *response_pl;
|
||||
size_t response_pl_sz;
|
||||
int rv;
|
||||
void (*complete)(struct pci_doe_task *task);
|
||||
void *private;
|
||||
|
||||
/* No need for the user to initialize these fields */
|
||||
struct work_struct work;
|
||||
struct pci_doe_mb *doe_mb;
|
||||
};
|
||||
|
||||
/**
|
||||
* pci_doe_for_each_off - Iterate each DOE capability
|
||||
* @pdev: struct pci_dev to iterate
|
||||
* @off: u16 of config space offset of each mailbox capability found
|
||||
*/
|
||||
#define pci_doe_for_each_off(pdev, off) \
|
||||
for (off = pci_find_next_ext_capability(pdev, off, \
|
||||
PCI_EXT_CAP_ID_DOE); \
|
||||
off > 0; \
|
||||
off = pci_find_next_ext_capability(pdev, off, \
|
||||
PCI_EXT_CAP_ID_DOE))
|
||||
|
||||
struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset);
|
||||
bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type);
|
||||
int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task);
|
||||
|
||||
#endif
|
||||
@@ -151,6 +151,7 @@
|
||||
#define PCI_CLASS_OTHERS 0xff
|
||||
|
||||
/* Vendors and devices. Sort key: vendor first, device next. */
|
||||
#define PCI_VENDOR_ID_PCI_SIG 0x0001
|
||||
|
||||
#define PCI_VENDOR_ID_LOONGSON 0x0014
|
||||
|
||||
|
||||
@@ -235,6 +235,22 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size)
|
||||
#define BIN_ATTR_RW(_name, _size) \
|
||||
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
|
||||
|
||||
|
||||
#define __BIN_ATTR_ADMIN_RO(_name, _size) { \
|
||||
.attr = { .name = __stringify(_name), .mode = 0400 }, \
|
||||
.read = _name##_read, \
|
||||
.size = _size, \
|
||||
}
|
||||
|
||||
#define __BIN_ATTR_ADMIN_RW(_name, _size) \
|
||||
__BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size)
|
||||
|
||||
#define BIN_ATTR_ADMIN_RO(_name, _size) \
|
||||
struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size)
|
||||
|
||||
#define BIN_ATTR_ADMIN_RW(_name, _size) \
|
||||
struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size)
|
||||
|
||||
struct sysfs_ops {
|
||||
ssize_t (*show)(struct kobject *, struct attribute *, char *);
|
||||
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
|
||||
|
||||
Reference in New Issue
Block a user