AsoC: Phase out hybrid PCI devres

Merge series from Philipp Stanner <phasta@kernel.org>:

A year ago we spent quite some work trying to get PCI into better shape.
Some pci_ functions can be sometimes managed with devres, which is
obviously bad. We want to provide an obvious API, where pci_ functions
are never, and pcim_ functions are always managed.

Thus, everyone enabling his device with pcim_enable_device() must be
ported to pcim_ functions. Porting all users will later enable us to
significantly simplify parts of the PCI subsystem. See here [1] for
details.

This patch series does that for sound.

Feel free to squash the commits as you see fit.

P.

[1] https://elixir.bootlin.com/linux/v6.14-rc4/source/drivers/pci/devres.c#L18
This commit is contained in:
Mark Brown
2025-05-06 08:42:25 +09:00
263 changed files with 3651 additions and 1510 deletions

View File

@@ -111,7 +111,7 @@ Description: RO. Package current voltage in millivolt.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp2_input
Date: March 2025
KernelVersion: 6.14
KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RO. Package temperature in millidegree Celsius.
@@ -119,7 +119,7 @@ Description: RO. Package temperature in millidegree Celsius.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp3_input
Date: March 2025
KernelVersion: 6.14
KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RO. VRAM temperature in millidegree Celsius.

View File

@@ -56,19 +56,18 @@ properties:
enum:
- snps,dw-apb-ssi
- snps,dwc-ssi-1.01a
- description: Microsemi Ocelot/Jaguar2 SoC SPI Controller
items:
- enum:
- mscc,ocelot-spi
- mscc,jaguar2-spi
- const: snps,dw-apb-ssi
- description: Microchip Sparx5 SoC SPI Controller
const: microchip,sparx5-spi
- description: Amazon Alpine SPI Controller
const: amazon,alpine-dw-apb-ssi
- description: Renesas RZ/N1 SPI Controller
- description: Vendor controllers which use snps,dw-apb-ssi as fallback
items:
- const: renesas,rzn1-spi
- enum:
- mscc,ocelot-spi
- mscc,jaguar2-spi
- renesas,rzn1-spi
- sophgo,sg2042-spi
- thead,th1520-spi
- const: snps,dw-apb-ssi
- description: Intel Keem Bay SPI Controller
const: intel,keembay-ssi
@@ -88,10 +87,6 @@ properties:
- renesas,r9a06g032-spi # RZ/N1D
- renesas,r9a06g033-spi # RZ/N1S
- const: renesas,rzn1-spi # RZ/N1
- description: T-HEAD TH1520 SoC SPI Controller
items:
- const: thead,th1520-spi
- const: snps,dw-apb-ssi
reg:
minItems: 1

View File

@@ -89,8 +89,10 @@ definitions:
doc: Group of short_detected states
-
name: phy-upstream-type
enum-name:
enum-name: phy-upstream
header: linux/ethtool.h
type: enum
name-prefix: phy-upstream
entries: [ mac, phy ]
-
name: tcp-data-split

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1052,13 +1052,6 @@ NOSTDINC_FLAGS += -nostdinc
# perform bounds checking.
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
#Currently, disable -Wstringop-overflow for GCC 11, globally.
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
#Currently, disable -Wunterminated-string-initialization as broken
KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow

View File

@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void)
static const struct midr_range spectre_bhb_k132_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
{},
};
static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
{},
};
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),

View File

@@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
(void __user *) regs->iaoq[0]);
return -1;
int sig = signalcode >> 24;
if (sig == SIGFPE) {
/*
* Clear floating point trap bit to avoid trapping
* again on the first floating-point instruction in
* the userspace signal handler.
*/
regs->fr[0] &= ~(1ULL << 38);
}
force_sig_fault(sig, signalcode & 0xffffff,
(void __user *) regs->iaoq[0]);
return -1;
}
return signalcode ? -1 : 0;

View File

@@ -234,10 +234,8 @@ fi
# suppress some warnings in recent ld versions
nowarn="-z noexecstack"
if ! ld_is_lld; then
if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
nowarn="$nowarn --no-warn-rwx-segments"
fi
if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
nowarn="$nowarn --no-warn-rwx-segments"
fi
platformo=$object/"$platform".o

View File

@@ -258,10 +258,6 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
break;
}
}
if (i == hdr->e_shnum) {
pr_err("%s: doesn't contain __patchable_function_entries.\n", me->name);
return -ENOEXEC;
}
#endif
pr_debug("Looks like a total of %lu stubs, max\n", relocs);

View File

@@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
return 0;
}
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
{
if (radix_enabled())
@@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
return false;
}
#endif
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next)
@@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd;
pte_t *pte;
/*
* Make sure we align the start vmemmap addr so that we calculate
* the correct start_pfn in altmap boundary check to decided whether
* we should use altmap or RAM based backing memory allocation. Also
* the address need to be aligned for set_pte operation.
* If the start addr is already PMD_SIZE aligned we will try to use
* a pmd mapping. We don't want to be too aggressive here beacause
* that will cause more allocations in RAM. So only if the namespace
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
*/
start = ALIGN_DOWN(start, PAGE_SIZE);
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation.
*/
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/*
* make sure we don't create altmap mappings
* covering things outside the device.

View File

@@ -17,7 +17,7 @@ config PPC_POWERNV
select MMU_NOTIFIER
select FORCE_SMP
select ARCH_SUPPORTS_PER_VMA_LOCK
select PPC_RADIX_BROADCAST_TLBIE
select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
default y
config OPAL_PRD

View File

@@ -23,7 +23,7 @@ config PPC_PSERIES
select FORCE_SMP
select SWIOTLB
select ARCH_SUPPORTS_PER_VMA_LOCK
select PPC_RADIX_BROADCAST_TLBIE
select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
default y
config PARAVIRT

View File

@@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
static bool sevsnp;
/* Platform-specific memory-acceptance call goes here */
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
sevsnp = true;
} else if (early_is_sevsnp_guest()) {
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");

View File

@@ -645,3 +645,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
sev_verify_cbit(top_level_pgt);
}
bool early_is_sevsnp_guest(void)
{
static bool sevsnp;
if (sevsnp)
return true;
if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
return false;
sevsnp = true;
if (!snp_vmpl) {
unsigned int eax, ebx, ecx, edx;
/*
* CPUID Fn8000_001F_EAX[28] - SVSM support
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax & BIT(28)) {
struct msr m;
/* Obtain the address of the calling area to use */
boot_rdmsr(MSR_SVSM_CAA, &m);
boot_svsm_caa = (void *)m.q;
boot_svsm_caa_pa = m.q;
/*
* The real VMPL level cannot be discovered, but the
* memory acceptance routines make no use of that so
* any non-zero value suffices here.
*/
snp_vmpl = U8_MAX;
}
}
return true;
}

View File

@@ -13,12 +13,14 @@
bool sev_snp_enabled(void);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 sev_get_status(void);
bool early_is_sevsnp_guest(void);
#else
static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 sev_get_status(void) { return 0; }
static inline bool early_is_sevsnp_guest(void) { return false; }
#endif

View File

@@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
}
}
static inline int is_x86_event(struct perf_event *event)
int is_x86_event(struct perf_event *event)
{
int i;

View File

@@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};
if (arr[pebs_enable].host) {

View File

@@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
*/
intel_pmu_save_and_restart_reload(event, count);
}
} else
intel_pmu_save_and_restart(event);
} else {
/*
* For a non-precise event, it's possible the
* counters-snapshotting records a positive value for the
* overflowed event. Then the HW auto-reload mechanism
* reset the counter to 0 immediately, because the
* pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
* is not set. The counter backwards may be observed in a
* PMI handler.
*
* Since the event value has been updated when processing the
* counters-snapshotting record, only needs to set the new
* period for the counter.
*/
if (is_pebs_counter_event_group(event))
static_call(x86_pmu_set_period)(event);
else
intel_pmu_save_and_restart(event);
}
}
static __always_inline void

View File

@@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}
int is_x86_event(struct perf_event *event);
static inline bool check_leader_group(struct perf_event *leader, int flags)
{
return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
}
static inline bool is_branch_counters_group(struct perf_event *event)
{
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
}
static inline bool is_pebs_counter_event_group(struct perf_event *event)
{
return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
}
struct amd_nb {

View File

@@ -163,11 +163,10 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
if (ret)
goto unlock;
}
if (!scomp_scratch_users) {
if (!scomp_scratch_users++) {
ret = crypto_scomp_alloc_scratches();
if (ret)
goto unlock;
scomp_scratch_users++;
scomp_scratch_users--;
}
unlock:
mutex_unlock(&scomp_lock);

View File

@@ -544,7 +544,7 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n",
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
boot_params->power_profile);
}
@@ -646,7 +646,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
if (IVPU_WA(disable_d0i2))
boot_params->power_profile = 1;
boot_params->power_profile |= BIT(1);
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */

View File

@@ -14,7 +14,7 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
#define DCT_DEFAULT_ACTIVE_PERCENT 30u
#define DCT_PERIOD_US 35300u
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);

View File

@@ -428,16 +428,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
active_us = (DCT_PERIOD_US * active_percent) / 100;
inactive_us = DCT_PERIOD_US - active_us;
vdev->pm->dct_active_percent = active_percent;
ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
active_percent, active_us, inactive_us);
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
return ret;
}
vdev->pm->dct_active_percent = active_percent;
ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
active_percent, active_us, inactive_us);
return 0;
}
@@ -445,15 +446,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
{
int ret;
vdev->pm->dct_active_percent = 0;
ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
return ret;
}
vdev->pm->dct_active_percent = 0;
ivpu_dbg(vdev, PM, "DCT disabled\n");
return 0;
}
@@ -466,7 +468,7 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
return;
if (vdev->pm->dct_active_percent)
if (enable)
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
else
ret = ivpu_pm_dct_disable(vdev);

View File

@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
struct kobject *mkobj;
/* Lookup built-in module entry in /sys/modules */
mkobj = kset_find_obj(module_kset, drv->mod_name);
if (mkobj) {
mk = container_of(mkobj, struct module_kobject, kobj);
/* Lookup or create built-in module entry in /sys/modules */
mk = lookup_or_create_module_kobject(drv->mod_name);
if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
/* kset_find_obj took a reference */
kobject_put(mkobj);
/* lookup_or_create_module_kobject took a reference */
kobject_put(&mk->kobj);
}
}

View File

@@ -201,15 +201,10 @@ struct ublk_params_header {
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
struct ublk_queue *ubq, int tag, size_t offset);
const struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag);
static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
{
return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
}
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_ZONED;
@@ -609,14 +604,19 @@ static void ublk_apply_params(struct ublk_device *ub)
ublk_dev_param_zoned_apply(ub);
}
static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
return ubq->flags & UBLK_F_USER_COPY;
}
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
return !ublk_support_user_copy(ubq);
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -624,8 +624,11 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
/*
* read()/write() is involved in user copy, so request reference
* has to be grabbed
*
* for zero copy, request buffer need to be registered to io_uring
* buffer table, so reference is needed
*/
return ublk_support_user_copy(ubq);
return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
@@ -1946,13 +1949,20 @@ static void ublk_io_release(void *priv)
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
struct ublk_queue *ubq, unsigned int tag,
const struct ublk_queue *ubq, unsigned int tag,
unsigned int index, unsigned int issue_flags)
{
struct ublk_device *ub = cmd->file->private_data;
const struct ublk_io *io = &ubq->ios[tag];
struct request *req;
int ret;
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
return -EINVAL;
req = __ublk_check_and_get_req(ub, ubq, tag, 0);
if (!req)
return -EINVAL;
@@ -1968,8 +1978,17 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
}
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
const struct ublk_queue *ubq, unsigned int tag,
unsigned int index, unsigned int issue_flags)
{
const struct ublk_io *io = &ubq->ios[tag];
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
return -EINVAL;
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
@@ -2073,7 +2092,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_IO_REGISTER_IO_BUF:
return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_UNREGISTER_IO_BUF:
return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
if (ret)
@@ -2125,13 +2144,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
struct ublk_queue *ubq, int tag, size_t offset)
const struct ublk_queue *ubq, int tag, size_t offset)
{
struct request *req;
if (!ublk_need_req_ref(ubq))
return NULL;
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
if (!req)
return NULL;
@@ -2245,6 +2261,9 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
if (!ubq)
return ERR_PTR(-EINVAL);
if (!ublk_support_user_copy(ubq))
return ERR_PTR(-EACCES);
if (tag >= ubq->q_depth)
return ERR_PTR(-EINVAL);
@@ -2783,13 +2802,18 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
UBLK_F_URING_CMD_COMP_IN_TASK;
/* GET_DATA isn't needed any more with USER_COPY */
if (ublk_dev_is_user_copy(ub))
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
/* Zoned storage support requires user copy feature */
/*
* Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
* returning write_append_lba, which is only allowed in case of
* user copy or zero copy
*/
if (ublk_dev_is_zoned(ub) &&
(!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
(!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
(UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
ret = -EINVAL;
goto out_free_dev_number;
}

View File

@@ -957,8 +957,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
if (skb->data[2] == 0x97)
if (skb->data[2] == 0x97) {
hci_recv_diag(hdev, skb);
return 0;
}
}
return hci_recv_frame(hdev, skb);
@@ -974,7 +976,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -1051,24 +1052,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
if (!new_skb) {
bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
skb->len);
ret = -ENOMEM;
goto exit_error;
}
hci_skb_pkt_type(new_skb) = pkt_type;
skb_put_data(new_skb, skb->data, plen);
hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
ret = btintel_pcie_recv_event(hdev, new_skb);
ret = btintel_pcie_recv_event(hdev, skb);
else
ret = hci_recv_frame(hdev, new_skb);
ret = hci_recv_frame(hdev, skb);
skb = NULL; /* skb is freed in the callee */
exit_error:
if (skb)
kfree_skb(skb);
if (ret)
hdev->stat.err_rx++;
@@ -1202,8 +1199,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
int err;
struct hci_dev *hdev = data->hdev;
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
@@ -1224,11 +1219,7 @@ static void btintel_pcie_rx_work(struct work_struct *work)
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
err = btintel_pcie_recv_frame(data, skb);
if (err)
bt_dev_err(hdev, "Failed to send received frame: %d",
err);
kfree_skb(skb);
btintel_pcie_recv_frame(data, skb);
}
}
@@ -1281,10 +1272,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
if (cr_tia == cr_hia) {
bt_dev_warn(hdev, "RXQ: no new CD found");
if (cr_tia == cr_hia)
return;
}
rxq = &data->rxq;
@@ -1320,6 +1309,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
{
return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
}
static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
{
return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
}
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -1351,12 +1350,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
if (!btintel_pcie_is_rxq_empty(data))
btintel_pcie_msix_rx_handle(data);
}
/* For RX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
if (!btintel_pcie_is_txackq_empty(data))
btintel_pcie_msix_tx_handle(data);
}
/*
* Before sending the interrupt the HW disables it to prevent a nested

View File

@@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
/* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
hdev = bdev->hdev;
/* Make sure to call btmtksdio_close before removing sdio card */
if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
btmtksdio_close(hdev);
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
hdev = bdev->hdev;
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);

View File

@@ -3010,22 +3010,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
* ==0: not a dump pkt.
* < 0: fails to handle a dump pkt
* > 0: otherwise.
*/
/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
int ret = 1;
int ret = 0;
u8 pkt_type;
u8 *sk_ptr;
unsigned int sk_len;
u16 seqno;
u32 dump_size;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
@@ -3035,30 +3029,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
sk_len = skb->len;
if (pkt_type == HCI_ACLDATA_PKT) {
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return 0;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
} else {
event_hdr = hci_event_hdr(skb);
}
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return 0;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return 0;
/*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3132,17 +3110,84 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
/* Return: true if the ACL packet is a dump packet, false otherwise. */
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return false;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
/* Return: true if the event packet is a dump packet, false otherwise. */
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
event_hdr = hci_event_hdr(skb);
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
if (handle_dump_pkt_qca(hdev, skb))
return 0;
if (acl_pkt_is_dump_qca(hdev, skb))
return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
if (handle_dump_pkt_qca(hdev, skb))
return 0;
if (evt_pkt_is_dump_qca(hdev, skb))
return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}

View File

@@ -909,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
if (acpi_cpufreq_driver.set_boost)
policy->boost_supported = true;
if (acpi_cpufreq_driver.set_boost) {
if (policy->boost_supported) {
/*
* The firmware may have altered boost state while the
* CPU was offline (for example during a suspend-resume
* cycle).
*/
if (policy->boost_enabled != boost_state(cpu))
set_boost(policy, policy->boost_enabled);
} else {
policy->boost_supported = true;
}
}
return result;

View File

@@ -536,14 +536,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
unsigned int target_freq,
unsigned int min, unsigned int max,
unsigned int relation)
{
unsigned int idx;
target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -577,8 +581,7 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
if (unlikely(min > max))
min = max;
return __resolve_freq(policy, clamp_val(target_freq, min, max),
CPUFREQ_RELATION_LE);
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2397,8 +2400,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = __resolve_freq(policy, target_freq, relation);
target_freq = __resolve_freq(policy, target_freq, policy->min,
policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2727,8 +2730,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* compiler optimizations around them because they may be accessed
* concurrently by cpufreq_driver_resolve_freq() during the update.
*/
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
new_data.min, new_data.max,
CPUFREQ_RELATION_H));
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
new_data.max, CPUFREQ_RELATION_L);
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
trace_cpu_frequency_limits(policy);

View File

@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
index = cpufreq_frequency_table_target(policy, freq_next, relation);
index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;

View File

@@ -115,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
unsigned int target_freq, unsigned int min,
unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -147,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((freq < policy->min) || (freq > policy->max))
if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;

View File

@@ -598,6 +598,9 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
if (!cpu_feature_enabled(X86_FEATURE_IDA))
return true;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);

View File

@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
if (priv->ecc_uecnt_offset)
if (priv->ecc_cecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
}
}
/* Interrupt mode set to every SBERR */
regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
@@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
/* Set irq mask for DDR SBE to avoid any pending irq before registration */
regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
(A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;

View File

@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0

View File

@@ -188,7 +188,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
select STACKDEPOT
depends on DRM_KMS_HELPER
select DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help

View File

@@ -121,7 +121,6 @@ struct adp_drv_private {
dma_addr_t mask_iova;
int be_irq;
int fe_irq;
spinlock_t irq_lock;
struct drm_pending_vblank_event *event;
};
@@ -288,6 +287,7 @@ static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
writel(BIT(0), adp->be + ADBE_BLEND_EN3);
writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
writel(BIT(0), adp->be + ADBE_BLEND_EN4);
drm_crtc_vblank_on(crtc);
}
static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -310,6 +310,7 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
u32 frame_num = 1;
unsigned long flags;
struct adp_drv_private *adp = crtc_to_adp(crtc);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
u64 new_size = ALIGN(new_state->mode.hdisplay *
@@ -330,13 +331,19 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
}
writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
//FIXME: use adbe flush interrupt
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_vblank_get(crtc);
adp->event = crtc->state->event;
struct drm_pending_vblank_event *event = crtc->state->event;
crtc->state->event = NULL;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, event);
else
adp->event = event;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
crtc->state->event = NULL;
spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_funcs adp_crtc_funcs = {
@@ -482,8 +489,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg)
u32 int_status;
u32 int_ctl;
spin_lock(&adp->irq_lock);
int_status = readl(adp->fe + ADP_INT_STATUS);
if (int_status & ADP_INT_STATUS_VBLANK) {
drm_crtc_handle_vblank(&adp->crtc);
@@ -501,7 +506,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg)
writel(int_status, adp->fe + ADP_INT_STATUS);
spin_unlock(&adp->irq_lock);
return IRQ_HANDLED;
}
@@ -512,8 +516,7 @@ static int adp_drm_bind(struct device *dev)
struct adp_drv_private *adp = to_adp(drm);
int err;
adp_disable_vblank(adp);
writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL);
adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
if (IS_ERR(adp->next_bridge)) {
@@ -567,8 +570,6 @@ static int adp_probe(struct platform_device *pdev)
if (IS_ERR(adp))
return PTR_ERR(adp);
spin_lock_init(&adp->irq_lock);
dev_set_drvdata(&pdev->dev, &adp->drm);
err = adp_parse_of(pdev, adp);

View File

@@ -199,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
/* XGMI-accessible memory should never be DMA-mapped */
if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
dma_buf_attach_adev(attach), bo)))
return ERR_PTR(-EINVAL);
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
bo->tbo.base.size, attach->dev,
dir, &sgt);

View File

@@ -360,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
#define MMIO_REG_HOLE_OFFSET 0x44000
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
{

View File

@@ -502,6 +502,52 @@ static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
* vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode
*
* @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state)
{
struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
int vcn_inst;
vcn_inst = GET_INST(VCN, vinst->inst);
/* pause/unpause if state is changed */
if (vinst->pause_state.fw_based != new_state->fw_based) {
DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
vinst->pause_state.fw_based, new_state->fw_based,
new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
/* wait for ACK */
SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
} else {
/* unpause DPG, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
}
vinst->pause_state.fw_based = new_state->fw_based;
}
return 0;
}
/**
* vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
*
@@ -518,6 +564,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
volatile struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
int vcn_inst;
uint32_t tmp;
@@ -582,6 +629,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
/* Pause dpg */
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
@@ -775,9 +825,13 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
vcn_inst = GET_INST(VCN, inst_idx);
/* Unpause dpg */
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

View File

@@ -173,6 +173,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int conn_index = aconnector->base.index;
guard(mutex)(&hdcp_w->mutex);
drm_connector_get(&aconnector->base);
if (hdcp_w->aconnector[conn_index])
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -220,7 +223,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
unsigned int conn_index = aconnector->base.index;
guard(mutex)(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -236,7 +238,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
if (hdcp_w->aconnector[conn_index]) {
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = NULL;
}
process_output(hdcp_w);
}
@@ -254,6 +259,10 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
if (hdcp_w->aconnector[conn_index]) {
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = NULL;
}
}
process_output(hdcp_w);
@@ -488,6 +497,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -544,7 +554,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
drm_connector_get(&aconnector->base);
if (hdcp_w->aconnector[conn_index])
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
}

View File

@@ -964,6 +964,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
if (!drm_dev_enter(dev, &idx))
return;
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
@@ -983,6 +987,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
drm_dev_exit(idx);
}
EXPORT_SYMBOL(drm_show_fdinfo);

View File

@@ -1469,9 +1469,9 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
}
i += 1 << order;
num_dma_mapped = i;
range->flags.has_dma_mapping = true;
}
range->flags.has_dma_mapping = true;
if (zdd) {
range->flags.has_devmem_pages = true;
range->dpagemap = dpagemap;

View File

@@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
struct mipi_dbi *dbi = &dbidev->dbi;
size_t len = width * height * 2;
const struct drm_format_info *dst_format;
size_t len;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
dst_format = drm_format_info(dbidev->pixel_format);
len = drm_format_info_min_pitch(dst_format, 0, width) * height;
memset(dbidev->tx_buf, 0, len);
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);

View File

@@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#else
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
@@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
return 0;
}
static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
{
return false;
}
#endif
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#endif /*__INTEL_PXP_GSCCS_H__ */

View File

@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
if (error)
if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
if (nouveau_fence_signal(fence))

View File

@@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_EXPECT_NULL(test, shmem->sgt);
ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
KUNIT_ASSERT_EQ(test, ret, 0);

View File

@@ -1093,7 +1093,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
/** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
/** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
bool hit_low, evict_low;
};

View File

@@ -52,6 +52,8 @@ struct xe_eu_stall_data_stream {
struct xe_gt *gt;
struct xe_bo *bo;
/* Lock to protect data buffer pointers */
struct mutex xecore_buf_lock;
struct per_xecore_buf *xecore_buf;
struct {
bool reported_to_user;
@@ -208,6 +210,9 @@ int xe_eu_stall_init(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
int ret;
if (!xe_eu_stall_supported_on_platform(xe))
return 0;
gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL);
if (!gt->eu_stall) {
ret = -ENOMEM;
@@ -378,7 +383,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
u16 group, instance;
unsigned int xecore;
mutex_lock(&gt->eu_stall->stream_lock);
mutex_lock(&stream->xecore_buf_lock);
for_each_dss_steering(xecore, gt, group, instance) {
xecore_buf = &stream->xecore_buf[xecore];
read_ptr = xecore_buf->read;
@@ -396,7 +401,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
set_bit(xecore, stream->data_drop.mask);
xecore_buf->write = write_ptr;
}
mutex_unlock(&gt->eu_stall->stream_lock);
mutex_unlock(&stream->xecore_buf_lock);
return min_data_present;
}
@@ -511,11 +516,13 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
unsigned int xecore;
int ret = 0;
mutex_lock(&stream->xecore_buf_lock);
if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
if (!stream->data_drop.reported_to_user) {
stream->data_drop.reported_to_user = true;
xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
mutex_unlock(&stream->xecore_buf_lock);
return -EIO;
}
stream->data_drop.reported_to_user = false;
@@ -527,6 +534,7 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
if (ret || count == total_size)
break;
}
mutex_unlock(&stream->xecore_buf_lock);
return total_size ?: (ret ?: -EAGAIN);
}
@@ -583,6 +591,7 @@ static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream)
{
struct xe_gt *gt = stream->gt;
mutex_destroy(&stream->xecore_buf_lock);
gt->eu_stall->stream = NULL;
kfree(stream);
}
@@ -718,6 +727,7 @@ static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
}
init_waitqueue_head(&stream->poll_wq);
mutex_init(&stream->xecore_buf_lock);
INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn);
stream->per_xecore_buf_size = per_xecore_buf_size;
stream->sampling_rate_mult = props->sampling_rate_mult;

View File

@@ -7,6 +7,7 @@
#define __XE_EU_STALL_H__
#include "xe_gt_types.h"
#include "xe_sriov.h"
size_t xe_eu_stall_get_per_xecore_buf_size(void);
size_t xe_eu_stall_data_record_size(struct xe_device *xe);
@@ -19,6 +20,6 @@ int xe_eu_stall_stream_open(struct drm_device *dev,
static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe)
{
return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20;
return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20);
}
#endif

View File

@@ -359,7 +359,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
ext->reg = XE_REG(extlist->reg.__reg.addr);
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
ext->regname = extlist->name;
}

View File

@@ -79,7 +79,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range)
return ERR_PTR(-ENOMEM);
return NULL;
INIT_LIST_HEAD(&range->garbage_collector_link);
xe_vm_get(gpusvm_to_vm(gpusvm));

View File

@@ -307,7 +307,7 @@ void __init hv_get_partition_id(void)
local_irq_save(flags);
output = *this_cpu_ptr(hyperv_pcpu_input_arg);
status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output);
status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output);
pt_id = output->partition_id;
local_irq_restore(flags);
@@ -566,9 +566,11 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
kfree(*synic_eventring_tail);
*synic_eventring_tail = NULL;
if (hv_root_partition()) {
synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
kfree(*synic_eventring_tail);
*synic_eventring_tail = NULL;
}
return 0;
}

View File

@@ -1380,9 +1380,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
return 0;
rpm_disable:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}

View File

@@ -3664,6 +3664,14 @@ static int __init parse_ivrs_acpihid(char *str)
while (*uid == '0' && *(uid + 1))
uid++;
if (strlen(hid) >= ACPIHID_HID_LEN) {
pr_err("Invalid command line: hid is too long\n");
return 1;
} else if (strlen(uid) >= ACPIHID_UID_LEN) {
pr_err("Invalid command line: uid is too long\n");
return 1;
}
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));

View File

@@ -411,6 +411,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
/*
* Choose page_size as the leaf page size for invalidation when
* ARM_SMMU_FEAT_RANGE_INV is present
*/
smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
smmu_domain->smmu = smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,

View File

@@ -3388,6 +3388,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
mutex_lock(&smmu->streams_mutex);
for (i = 0; i < fwspec->num_ids; i++) {
struct arm_smmu_stream *new_stream = &master->streams[i];
struct rb_node *existing;
u32 sid = fwspec->ids[i];
new_stream->id = sid;
@@ -3398,11 +3399,21 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
break;
/* Insert into SID tree */
if (rb_find_add(&new_stream->node, &smmu->streams,
arm_smmu_streams_cmp_node)) {
dev_warn(master->dev, "stream %u already in tree\n",
sid);
ret = -EINVAL;
existing = rb_find_add(&new_stream->node, &smmu->streams,
arm_smmu_streams_cmp_node);
if (existing) {
struct arm_smmu_master *existing_master =
rb_entry(existing, struct arm_smmu_stream, node)
->master;
/* Bridged PCI devices may end up with duplicated IDs */
if (existing_master == master)
continue;
dev_warn(master->dev,
"Aliasing StreamID 0x%x (from %s) unsupported, expect DMA to be broken\n",
sid, dev_name(existing_master->dev));
ret = -ENODEV;
break;
}
}
@@ -4429,6 +4440,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
if (FIELD_GET(IDR3_RIL, reg))
smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
if (FIELD_GET(IDR3_FWB, reg))
smmu->features |= ARM_SMMU_FEAT_S2FWB;
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);

View File

@@ -3785,20 +3785,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
intel_iommu_debugfs_create_dev(info);
/*
* The PCIe spec, in its wisdom, declares that the behaviour of the
* device is undefined if you enable PASID support after ATS support.
* So always enable PASID support on devices which have it, even if
* we can't yet know if we're ever going to use it.
*/
if (info->pasid_supported &&
!pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
if (sm_supported(iommu))
iommu_enable_pci_ats(info);
iommu_enable_pci_pri(info);
return &iommu->iommu;
free_table:
intel_pasid_free_table(dev);
@@ -3810,6 +3796,26 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
return ERR_PTR(ret);
}
static void intel_iommu_probe_finalize(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
/*
* The PCIe spec, in its wisdom, declares that the behaviour of the
* device is undefined if you enable PASID support after ATS support.
* So always enable PASID support on devices which have it, even if
* we can't yet know if we're ever going to use it.
*/
if (info->pasid_supported &&
!pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1))
info->pasid_enabled = 1;
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev))
iommu_enable_pci_ats(info);
iommu_enable_pci_pri(info);
}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4391,6 +4397,7 @@ const struct iommu_ops intel_iommu_ops = {
.domain_alloc_sva = intel_svm_domain_alloc,
.domain_alloc_nested = intel_iommu_domain_alloc_nested,
.probe_device = intel_iommu_probe_device,
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
@@ -4432,6 +4439,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
/* QM57/QS57 integrated gfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
/* Broadwell igfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
@@ -4509,7 +4519,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);

View File

@@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
if (pin == GPIO_NO_WAKE_IRQ)
return irq_domain_disconnect_hierarchy(domain, virq);
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
&qcom_mpm_chip, priv);
if (ret)

View File

@@ -68,6 +68,8 @@
#define LIST_DIRTY 1
#define LIST_SIZE 2
#define SCAN_RESCHED_CYCLE 16
/*--------------------------------------------------------------*/
/*
@@ -2424,7 +2426,12 @@ static void __scan(struct dm_bufio_client *c)
atomic_long_dec(&c->need_shrink);
freed++;
cond_resched();
if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
dm_bufio_unlock(c);
cond_resched();
dm_bufio_lock(c);
}
}
}
}

View File

@@ -5164,7 +5164,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
BUG_ON(!list_empty(&ic->wait_list));
if (ic->mode == 'B')
if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
cancel_delayed_work_sync(&ic->bitmap_flush_work);
if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq);

View File

@@ -523,9 +523,10 @@ static char **realloc_argv(unsigned int *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
if (argv && old_argv) {
memcpy(argv, old_argv, *size * sizeof(*argv));
if (argv) {
*size = new_size;
if (old_argv)
memcpy(argv, old_argv, *size * sizeof(*argv));
}
kfree(old_argv);
@@ -1049,7 +1050,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
unsigned int min_pool_size = 0, pool_size;
struct dm_md_mempools *pools;
unsigned int bioset_flags = 0;
bool mempool_needs_integrity = t->integrity_supported;
if (unlikely(type == DM_TYPE_NONE)) {
DMERR("no table type is set, can't allocate mempools");
@@ -1074,8 +1074,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios);
mempool_needs_integrity |= ti->mempool_needs_integrity;
}
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size,

View File

@@ -691,8 +691,8 @@ config MMC_TMIO_CORE
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on (RESET_CONTROLLER && REGULATOR) || !OF
select MMC_TMIO_CORE
select RESET_CONTROLLER if ARCH_RENESAS
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs

View File

@@ -1179,7 +1179,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(rdev)) {
dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev));
ret = PTR_ERR(rdev);
goto efree;
goto edisclk;
}
priv->rdev = rdev;
}
@@ -1243,26 +1243,26 @@ int renesas_sdhi_probe(struct platform_device *pdev,
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
ret = num_irqs;
goto eirq;
goto edisclk;
}
/* There must be at least one IRQ source */
if (!num_irqs) {
ret = -ENXIO;
goto eirq;
goto edisclk;
}
for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
goto eirq;
goto edisclk;
}
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
goto edisclk;
}
ret = tmio_mmc_host_probe(host);
@@ -1274,8 +1274,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
return ret;
eirq:
tmio_mmc_host_remove(host);
edisclk:
renesas_sdhi_clk_disable(host);
efree:

View File

@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
int port;
int i, port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
for (i = 0; i < taprio->num_entries; i++)
vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);

View File

@@ -186,7 +186,6 @@ void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
pds_client_unregister(pf, padev->client_id);
auxiliary_device_delete(&padev->aux_dev);
auxiliary_device_uninit(&padev->aux_dev);
padev->client_id = 0;
*pd_ptr = NULL;
mutex_unlock(&pf->config_lock);

View File

@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
if (pdata->netdev->features & NETIF_F_RXCSUM) {
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
} else {
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
pdata->rx_buf_size);
}
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,

View File

@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
{
unsigned int i;
for (i = 0; i < pdata->channel_count; i++) {
if (!pdata->channel[i]->rx_ring)
break;
XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
}
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
xgbe_config_sph_mode(pdata);
xgbe_config_rss(pdata);
if (pdata->netdev->features & NETIF_F_RXCSUM) {
xgbe_config_sph_mode(pdata);
xgbe_config_rss(pdata);
}
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
/* For Split Header*/
hw_if->enable_sph = xgbe_config_sph_mode;
hw_if->disable_sph = xgbe_disable_sph_mode;
DBGPR("<--xgbe_init_function_ptrs\n");
}

View File

@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
if ((features & NETIF_F_RXCSUM) && !rxcsum)
if ((features & NETIF_F_RXCSUM) && !rxcsum) {
hw_if->enable_sph(pdata);
hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
schedule_work(&pdata->restart_work);
} else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
hw_if->disable_sph(pdata);
hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
schedule_work(&pdata->restart_work);
}
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);

View File

@@ -865,6 +865,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
/* For Split Header */
void (*enable_sph)(struct xgbe_prv_data *pdata);
void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an

View File

@@ -2015,6 +2015,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
}
return skb;
vlan_err:
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -3414,6 +3415,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
}
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
@@ -11599,6 +11603,9 @@ static void bnxt_init_napi(struct bnxt *bp)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
@@ -12318,12 +12325,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
struct hwrm_func_drv_if_change_input *req;
bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
bool caps_change = false;
int rc, retry = 0;
bool fw_reset;
u32 flags = 0;
fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
bp->fw_reset_state = 0;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -12392,13 +12402,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
/* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
if (rc) {
clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
netdev_err(bp->dev, "init int mode failed\n");
return rc;
}
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
@@ -12797,8 +12802,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@@ -14833,7 +14836,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
bp->fw_reset_state = 0;
bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
netif_close(bp->dev);
}
@@ -16003,8 +16006,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_rdma_aux_device_del(bp);
bnxt_ptp_clear(bp);
unregister_netdev(dev);
bnxt_ptp_clear(bp);
bnxt_rdma_aux_device_uninit(bp);
@@ -16931,10 +16934,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
/* IRQ will be initialized later in bnxt_io_resume */
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
err = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, err);
}
reset_exit:
@@ -16963,10 +16965,13 @@ static void bnxt_io_resume(struct pci_dev *pdev)
err = bnxt_hwrm_func_qcaps(bp);
if (!err) {
if (netif_running(netdev))
if (netif_running(netdev)) {
err = bnxt_open(netdev);
else
} else {
err = bnxt_reserve_rings(bp, true);
if (!err)
err = bnxt_init_int_mode(bp);
}
}
if (!err)

View File

@@ -2614,6 +2614,7 @@ struct bnxt {
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
#define BNXT_FW_RESET_STATE_ABORT 7
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20

View File

@@ -110,20 +110,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
info->dest_buf_size += len;
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
u16 copylen = min_t(u16, len,
info->dest_buf_size - off);
memcpy(info->dest_buf + off, dma_buf, copylen);
if (copylen < len)
break;
} else {
rc = -ENOBUFS;
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
kfree(info->dest_buf);
info->dest_buf = NULL;
}
break;
}
}
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;

View File

@@ -2062,6 +2062,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
return reg_len;
}
#define BNXT_PCIE_32B_ENTRY(start, end) \
{ offsetof(struct pcie_ctx_hw_stats, start), \
offsetof(struct pcie_ctx_hw_stats, end) }
static const struct {
u16 start;
u16 end;
} bnxt_pcie_32b_entries[] = {
BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
};
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
@@ -2094,12 +2105,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
rc = hwrm_req_send(bp, req);
if (!rc) {
__le64 *src = (__le64 *)hw_pcie_stats;
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
int i;
u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
u8 *src = (u8 *)hw_pcie_stats;
int i, j;
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
dst[i] = le64_to_cpu(src[i]);
for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
if (i >= bnxt_pcie_32b_entries[j].start &&
i <= bnxt_pcie_32b_entries[j].end) {
u32 *dst32 = (u32 *)(dst + i);
*dst32 = le32_to_cpu(*(__le32 *)(src + i));
i += 4;
if (i > bnxt_pcie_32b_entries[j].end &&
j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
j++;
} else {
u64 *dst64 = (u64 *)(dst + i);
*dst64 = le64_to_cpu(*(__le64 *)(src + i));
i += 8;
}
}
}
hwrm_req_drop(bp, req);
}
@@ -4991,6 +5017,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (etest->flags & ETH_TEST_FL_OFFLINE &&
bnxt_ulp_registered(bp->edev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4998,7 +5025,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;

View File

@@ -794,6 +794,27 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
return HZ;
}
void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
{
struct bnxt_ptp_tx_req *txts_req;
u16 cons = ptp->txts_cons;
/* make sure ptp aux worker finished with
* possible BNXT_STATE_OPEN set
*/
ptp_cancel_worker_sync(ptp->ptp_clock);
ptp->tx_avail = BNXT_MAX_TX_TS;
while (cons != ptp->txts_prod) {
txts_req = &ptp->txts_req[cons];
if (!IS_ERR_OR_NULL(txts_req->tx_skb))
dev_kfree_skb_any(txts_req->tx_skb);
cons = NEXT_TXTS(cons);
}
ptp->txts_cons = cons;
ptp_schedule_worker(ptp->ptp_clock, 0);
}
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
{
spin_lock_bh(&ptp->ptp_tx_lock);
@@ -1105,7 +1126,6 @@ int bnxt_ptp_init(struct bnxt *bp)
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int i;
if (!ptp)
return;
@@ -1117,12 +1137,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
for (i = 0; i < BNXT_MAX_TX_TS; i++) {
if (ptp->txts_req[i].tx_skb) {
dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
ptp->txts_req[i].tx_skb = NULL;
}
}
bnxt_unmap_ptp_regs(bp);
}

View File

@@ -162,6 +162,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);

View File

@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
np->led_mode = psrom->led_mode;
np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}

View File

@@ -335,7 +335,7 @@ typedef struct t_SROM {
u16 sub_system_id; /* 0x06 */
u16 pci_base_1; /* 0x08 (IP1000A only) */
u16 pci_base_2; /* 0x0a (IP1000A only) */
u16 led_mode; /* 0x0c (IP1000A only) */
__le16 led_mode; /* 0x0c (IP1000A only) */
u16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */

View File

@@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
writel(0, txq->bd.reg_desc_active);
if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(txq->bd.reg_desc_active) ||
!readl(txq->bd.reg_desc_active) ||
!readl(txq->bd.reg_desc_active) ||
!readl(txq->bd.reg_desc_active))
writel(0, txq->bd.reg_desc_active);
return 0;
}

View File

@@ -61,7 +61,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
.buf_len = HNS3_DBG_READ_LEN,
.buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{

View File

@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
/* enable vector */
hns3_mask_vector_irq(tqp_vector, 1);
}
static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
/* disable vector */
hns3_mask_vector_irq(tqp_vector, 0);
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u16 i;
for (i = 0; i < priv->vector_num; i++)
hns3_irq_enable(&priv->tqp_vector[i]);
for (i = 0; i < priv->vector_num; i++)
hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_enable(h->kinfo.tqp[i]);
}
static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u16 i;
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_disable(h->kinfo.tqp[i]);
for (i = 0; i < priv->vector_num; i++)
hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
for (i = 0; i < priv->vector_num; i++)
hns3_irq_disable(&priv->tqp_vector[i]);
}
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
/* enable the vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
/* enable rcb */
for (j = 0; j < h->kinfo.num_tqps; j++)
hns3_tqp_enable(h->kinfo.tqp[j]);
hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
while (j--)
hns3_tqp_disable(h->kinfo.tqp[j]);
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
/* disable rcb */
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_disable(h->kinfo.tqp[i]);
hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int i;
if (!if_running)
return;
@@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_disable(h->kinfo.tqp[i]);
hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int i;
if (!if_running)
return;
@@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_enable(h->kinfo.tqp[i]);
hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);

View File

@@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
spin_lock_init(&ptp->lock);
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
spin_lock_init(&ptp->lock);
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
return 0;
}

View File

@@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
if (ret)
return ret;
hdev->rxvtag_strip_en = enable;
return 0;
}
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -2204,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
ret = hclgevf_en_hw_strip_rxvtag(nic, true);
ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2879,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
ret = hclgevf_init_vlan_config(hdev);
ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2994,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
ret = hclgevf_init_vlan_config(hdev);
ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);

View File

@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];

View File

@@ -2345,15 +2345,15 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
}
if (hw->mac_type != ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
if (hw->mac_type == ICE_MAC_E810 ||
hw->mac_type == ICE_MAC_GENERIC)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
}
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)

View File

@@ -2097,6 +2097,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err_exit;
}
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||

View File

@@ -629,13 +629,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
#define IDPF_CAP_RX_CSUM_L4V4 (\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
#define IDPF_CAP_TX_CSUM_L4V4 (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
#define IDPF_CAP_RX_CSUM_L4V6 (\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_TX_CSUM_L4V6 (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_RX_CSUM (\
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
@@ -644,11 +644,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_SCTP_CSUM (\
#define IDPF_CAP_TX_SCTP_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
#define IDPF_CAP_TUNNEL_TX_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\

View File

@@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
netdev_features_t other_offloads = 0;
netdev_features_t csum_offloads = 0;
netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
@@ -766,53 +768,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
dflt_features |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
dflt_features |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
csum_offloads |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
dflt_features |= NETIF_F_RXCSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
dflt_features |= NETIF_F_SCTP_CRC;
csum_offloads |= NETIF_F_RXCSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
dflt_features |= NETIF_F_TSO;
tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
dflt_features |= NETIF_F_TSO6;
tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
dflt_features |= NETIF_F_GSO_UDP_L4;
tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
offloads |= NETIF_F_GRO_HW;
/* advertise to stack only if offloads for encapsulated packets is
* supported
*/
if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
offloads |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
0;
if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
IDPF_CAP_TUNNEL_TX_CSUM))
netdev->gso_partial_features |=
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
offloads |= NETIF_F_TSO_MANGLEID;
}
other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
offloads |= NETIF_F_LOOPBACK;
other_offloads |= NETIF_F_LOOPBACK;
netdev->features |= dflt_features;
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
netdev->features |= dflt_features | csum_offloads | tso_offloads;
netdev->hw_features |= netdev->features | other_offloads;
netdev->vlan_features |= netdev->features | other_offloads;
netdev->hw_enc_features |= dflt_features | other_offloads;
idpf_set_ethtool_ops(netdev);
netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
@@ -1132,11 +1113,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
if (!vport->q_vector_idxs) {
kfree(vport);
if (!vport->q_vector_idxs)
goto free_vport;
return NULL;
}
idpf_vport_init(vport, max_q);
/* This alloc is done separate from the LUT because it's not strictly
@@ -1146,11 +1125,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
if (!rss_data->rss_key) {
kfree(vport);
if (!rss_data->rss_key)
goto free_vector_idxs;
return NULL;
}
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
@@ -1163,6 +1140,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
free_vector_idxs:
kfree(vport->q_vector_idxs);
free_vport:
kfree(vport);
return NULL;
}
/**

View File

@@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev)
{
struct idpf_adapter *adapter = pci_get_drvdata(pdev);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->vc_event_task);
idpf_vc_core_deinit(adapter);
idpf_deinit_dflt_mbx(adapter);

View File

@@ -1290,6 +1290,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
mutex_lock(&adapter->ptm_lock);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1308,7 +1310,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
if (!igc_is_crosststamp_supported(adapter))
break;
mutex_lock(&adapter->ptm_lock);
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
@@ -1332,7 +1333,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
igc_ptm_reset(hw);
mutex_unlock(&adapter->ptm_lock);
break;
default:
/* No work to do. */
@@ -1349,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
mutex_unlock(&adapter->ptm_lock);
wrfl();
}

View File

@@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
octep_stop(oct->netdev);
dev_close(oct->netdev);
rtnl_unlock();
}

View File

@@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
schedule_work(&oct->tx_timeout_task);
if (!schedule_work(&oct->tx_timeout_task))
netdev_put(netdev, NULL);
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)

View File

@@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = {
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
"top_usxgmii0_sel",
"top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
"top_xfi_phy0_xtal_sel",
"top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
@@ -2252,14 +2248,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
if (unlikely(dma_addr == DMA_MAPPING_ERROR))
addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
rxd->rxd2);
else
addr64 = RX_DMA_PREP_ADDR64(dma_addr);
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;

View File

@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
spin_lock(&priv->lock);
spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
spin_lock(&priv->lock);
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;

View File

@@ -176,6 +176,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
priv = ptpsq->txqsq.priv;
rtnl_lock();
mutex_lock(&priv->state_lock);
chs = &priv->channels;
netdev = priv->netdev;
@@ -183,22 +184,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
rtnl_lock();
mlx5e_activate_priv_channels(priv);
rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
netif_carrier_on(netdev);
mutex_unlock(&priv->state_lock);
rtnl_unlock();
return err;
}

View File

@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
/* We can't mix custom tunnel headers with symbolic ones and we
* don't have a symbolic field name for GBP, so we use custom
* tunnel headers in this case. We need hardware support to
* match on custom tunnel headers, but we already know it's
* supported because the previous call successfully checked for
* that.
*/
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_5);
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_5);
/* Shift by 8 to account for the reserved bits in the vxlan
* header after the VNI.
*/
MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
be32_to_cpu(enc_keyid.mask->keyid) << 8);
MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
be32_to_cpu(enc_keyid.key->keyid) << 8);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
return 0;
}
/* match on VNI is required */
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,

View File

@@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
!list_is_first(&attr->list, &flow->attrs))
return 0;
if (flow_flag_test(flow, SLOW))
return 0;
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
@@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
/* external dest with encap is considered as internal by firmware */
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
ext_dest = true;
else
int_dest = true;

View File

@@ -3533,7 +3533,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
err = mlx5_rdma_enable_roce(esw->dev);
if (err)
goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3594,6 +3596,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
err_roce:
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}

View File

@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
u8 mac[ETH_ALEN] = {};
union ib_gid gid;
u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
return;
return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
return;
return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
return;
return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
return err;
}

View File

@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */

View File

@@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
tx->frame_last = 0;
return -ENOMEM;
}
@@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
tx->frame_last = tx->frame_tail;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;

Some files were not shown because too many files have changed in this diff Show More