mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-14 06:30:20 -04:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.17-rc4). No conflicts. Adjacent changes: drivers/net/ethernet/intel/idpf/idpf_txrx.c02614eee26("idpf: do not linearize big TSO packets")6c4e684802("idpf: remove obsolete stashing code") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -97,6 +97,7 @@ config KEXEC_JUMP
|
||||
config KEXEC_HANDOVER
|
||||
bool "kexec handover"
|
||||
depends on ARCH_SUPPORTS_KEXEC_HANDOVER && ARCH_SUPPORTS_KEXEC_FILE
|
||||
depends on !DEFERRED_STRUCT_PAGE_INIT
|
||||
select MEMBLOCK_KHO_SCRATCH
|
||||
select KEXEC_FILE
|
||||
select DEBUG_FS
|
||||
|
||||
@@ -280,7 +280,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
|
||||
{
|
||||
if (!cpusets_insane_config() &&
|
||||
movable_only_nodes(nodes)) {
|
||||
static_branch_enable(&cpusets_insane_config_key);
|
||||
static_branch_enable_cpuslocked(&cpusets_insane_config_key);
|
||||
pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
|
||||
"Cpuset allocations might fail even with a lot of memory available.\n",
|
||||
nodemask_pr_args(nodes));
|
||||
@@ -1843,7 +1843,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
|
||||
if (is_partition_valid(cs))
|
||||
adding = cpumask_and(tmp->addmask,
|
||||
xcpus, parent->effective_xcpus);
|
||||
} else if (is_partition_invalid(cs) &&
|
||||
} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
|
||||
cpumask_subset(xcpus, parent->effective_xcpus)) {
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *child;
|
||||
@@ -3358,14 +3358,12 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
css_get(&cs->css);
|
||||
cpus_read_lock();
|
||||
mutex_lock(&cpuset_mutex);
|
||||
if (is_cpuset_online(cs))
|
||||
retval = update_prstate(cs, val);
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
cpus_read_unlock();
|
||||
css_put(&cs->css);
|
||||
return retval ?: nbytes;
|
||||
}
|
||||
|
||||
@@ -3870,9 +3868,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
partcmd = partcmd_invalidate;
|
||||
/*
|
||||
* On the other hand, an invalid partition root may be transitioned
|
||||
* back to a regular one.
|
||||
* back to a regular one with a non-empty effective xcpus.
|
||||
*/
|
||||
else if (is_partition_valid(parent) && is_partition_invalid(cs))
|
||||
else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
|
||||
!cpumask_empty(cs->effective_xcpus))
|
||||
partcmd = partcmd_update;
|
||||
|
||||
if (partcmd >= 0) {
|
||||
|
||||
@@ -479,6 +479,9 @@ void css_rstat_exit(struct cgroup_subsys_state *css)
|
||||
if (!css_uses_rstat(css))
|
||||
return;
|
||||
|
||||
if (!css->rstat_cpu)
|
||||
return;
|
||||
|
||||
css_rstat_flush(css);
|
||||
|
||||
/* sanity check */
|
||||
|
||||
@@ -483,8 +483,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
||||
pr_err("Reserved memory: unable to setup CMA region\n");
|
||||
return err;
|
||||
}
|
||||
/* Architecture specific contiguous memory fixup. */
|
||||
dma_contiguous_early_fixup(rmem->base, rmem->size);
|
||||
|
||||
if (default_cma)
|
||||
dma_contiguous_default_area = cma;
|
||||
|
||||
@@ -102,8 +102,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
||||
|
||||
#ifdef CONFIG_DMA_DIRECT_REMAP
|
||||
addr = dma_common_contiguous_remap(page, pool_size,
|
||||
pgprot_dmacoherent(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
pgprot_decrypted(pgprot_dmacoherent(PAGE_KERNEL)),
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
goto free_page;
|
||||
#else
|
||||
|
||||
@@ -2665,6 +2665,9 @@ static void perf_log_itrace_start(struct perf_event *event);
|
||||
|
||||
static void perf_event_unthrottle(struct perf_event *event, bool start)
|
||||
{
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
return;
|
||||
|
||||
event->hw.interrupts = 0;
|
||||
if (start)
|
||||
event->pmu->start(event, 0);
|
||||
@@ -2674,6 +2677,9 @@ static void perf_event_unthrottle(struct perf_event *event, bool start)
|
||||
|
||||
static void perf_event_throttle(struct perf_event *event)
|
||||
{
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
return;
|
||||
|
||||
event->hw.interrupts = MAX_INTERRUPTS;
|
||||
event->pmu->stop(event, 0);
|
||||
if (event == event->group_leader)
|
||||
|
||||
@@ -144,14 +144,34 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
|
||||
unsigned int order)
|
||||
{
|
||||
struct kho_mem_phys_bits *bits;
|
||||
struct kho_mem_phys *physxa;
|
||||
struct kho_mem_phys *physxa, *new_physxa;
|
||||
const unsigned long pfn_high = pfn >> order;
|
||||
|
||||
might_sleep();
|
||||
|
||||
physxa = xa_load_or_alloc(&track->orders, order, sizeof(*physxa));
|
||||
if (IS_ERR(physxa))
|
||||
return PTR_ERR(physxa);
|
||||
physxa = xa_load(&track->orders, order);
|
||||
if (!physxa) {
|
||||
int err;
|
||||
|
||||
new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
|
||||
if (!new_physxa)
|
||||
return -ENOMEM;
|
||||
|
||||
xa_init(&new_physxa->phys_bits);
|
||||
physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
|
||||
GFP_KERNEL);
|
||||
|
||||
err = xa_err(physxa);
|
||||
if (err || physxa) {
|
||||
xa_destroy(&new_physxa->phys_bits);
|
||||
kfree(new_physxa);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
physxa = new_physxa;
|
||||
}
|
||||
}
|
||||
|
||||
bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,
|
||||
sizeof(*bits));
|
||||
@@ -544,6 +564,7 @@ static void __init kho_reserve_scratch(void)
|
||||
err_free_scratch_desc:
|
||||
memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
|
||||
err_disable_kho:
|
||||
pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
|
||||
kho_enable = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -513,13 +513,14 @@ EXPORT_SYMBOL(param_array_ops);
|
||||
int param_set_copystring(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
const struct kparam_string *kps = kp->str;
|
||||
const size_t len = strnlen(val, kps->maxlen);
|
||||
|
||||
if (strnlen(val, kps->maxlen) == kps->maxlen) {
|
||||
if (len == kps->maxlen) {
|
||||
pr_err("%s: string doesn't fit in %u chars.\n",
|
||||
kp->name, kps->maxlen-1);
|
||||
return -ENOSPC;
|
||||
}
|
||||
strcpy(kps->string, val);
|
||||
memcpy(kps->string, val, len + 1);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(param_set_copystring);
|
||||
@@ -841,7 +842,7 @@ static void __init param_sysfs_builtin(void)
|
||||
dot = strchr(kp->name, '.');
|
||||
if (!dot) {
|
||||
/* This happens for core_param() */
|
||||
strcpy(modname, "kernel");
|
||||
strscpy(modname, "kernel");
|
||||
name_len = 0;
|
||||
} else {
|
||||
name_len = dot - kp->name + 1;
|
||||
|
||||
@@ -5749,6 +5749,9 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||
__setscheduler_class(p->policy, p->prio);
|
||||
struct sched_enq_and_set_ctx ctx;
|
||||
|
||||
if (!tryget_task_struct(p))
|
||||
continue;
|
||||
|
||||
if (old_class != new_class && p->se.sched_delayed)
|
||||
dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
|
||||
|
||||
@@ -5761,6 +5764,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||
sched_enq_and_set_task(&ctx);
|
||||
|
||||
check_class_changed(task_rq(p), p, old_class, p->prio);
|
||||
put_task_struct(p);
|
||||
}
|
||||
scx_task_iter_stop(&sti);
|
||||
percpu_up_write(&scx_fork_rwsem);
|
||||
|
||||
@@ -1397,6 +1397,7 @@ int register_ftrace_graph(struct fgraph_ops *gops)
|
||||
ftrace_graph_active--;
|
||||
gops->saved_func = NULL;
|
||||
fgraph_lru_release_index(i);
|
||||
unregister_pm_notifier(&ftrace_suspend_notifier);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4661,13 +4661,17 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
||||
} else {
|
||||
iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
|
||||
}
|
||||
} else {
|
||||
if (hash)
|
||||
iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash);
|
||||
else
|
||||
iter->hash = EMPTY_HASH;
|
||||
}
|
||||
|
||||
if (!iter->hash) {
|
||||
trace_parser_put(&iter->parser);
|
||||
goto out_unlock;
|
||||
}
|
||||
} else
|
||||
iter->hash = hash;
|
||||
if (!iter->hash) {
|
||||
trace_parser_put(&iter->parser);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
@@ -6543,9 +6547,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
||||
ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
|
||||
iter->hash, filter_hash);
|
||||
mutex_unlock(&ftrace_lock);
|
||||
} else {
|
||||
/* For read only, the hash is the ops hash */
|
||||
iter->hash = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&iter->ops->func_hash->regex_lock);
|
||||
|
||||
@@ -7666,7 +7666,7 @@ static __init int test_ringbuffer(void)
|
||||
rb_test_started = true;
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
/* Just run for 10 seconds */;
|
||||
/* Just run for 10 seconds */
|
||||
schedule_timeout(10 * HZ);
|
||||
|
||||
kthread_stop(rb_hammer);
|
||||
|
||||
@@ -1816,7 +1816,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto fail;
|
||||
|
||||
read++;
|
||||
cnt--;
|
||||
@@ -1830,7 +1830,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
while (cnt && isspace(ch)) {
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto fail;
|
||||
read++;
|
||||
cnt--;
|
||||
}
|
||||
@@ -1848,12 +1848,14 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
while (cnt && !isspace(ch) && ch) {
|
||||
if (parser->idx < parser->size - 1)
|
||||
parser->buffer[parser->idx++] = ch;
|
||||
else
|
||||
return -EINVAL;
|
||||
else {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto fail;
|
||||
read++;
|
||||
cnt--;
|
||||
}
|
||||
@@ -1868,11 +1870,15 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
/* Make sure the parsed string always terminates with '\0'. */
|
||||
parser->buffer[parser->idx] = 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*ppos += read;
|
||||
return read;
|
||||
fail:
|
||||
trace_parser_fail(parser);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TODO add a seq_buf_to_buffer() */
|
||||
@@ -10632,10 +10638,10 @@ static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_m
|
||||
ret = print_trace_line(&iter);
|
||||
if (ret != TRACE_TYPE_NO_CONSUME)
|
||||
trace_consume(&iter);
|
||||
|
||||
trace_printk_seq(&iter.seq);
|
||||
}
|
||||
touch_nmi_watchdog();
|
||||
|
||||
trace_printk_seq(&iter.seq);
|
||||
}
|
||||
|
||||
if (!cnt)
|
||||
|
||||
@@ -1292,6 +1292,7 @@ bool ftrace_event_is_function(struct trace_event_call *call);
|
||||
*/
|
||||
struct trace_parser {
|
||||
bool cont;
|
||||
bool fail;
|
||||
char *buffer;
|
||||
unsigned idx;
|
||||
unsigned size;
|
||||
@@ -1299,7 +1300,7 @@ struct trace_parser {
|
||||
|
||||
static inline bool trace_parser_loaded(struct trace_parser *parser)
|
||||
{
|
||||
return (parser->idx != 0);
|
||||
return !parser->fail && parser->idx != 0;
|
||||
}
|
||||
|
||||
static inline bool trace_parser_cont(struct trace_parser *parser)
|
||||
@@ -1313,6 +1314,11 @@ static inline void trace_parser_clear(struct trace_parser *parser)
|
||||
parser->idx = 0;
|
||||
}
|
||||
|
||||
static inline void trace_parser_fail(struct trace_parser *parser)
|
||||
{
|
||||
parser->fail = true;
|
||||
}
|
||||
|
||||
extern int trace_parser_get_init(struct trace_parser *parser, int size);
|
||||
extern void trace_parser_put(struct trace_parser *parser);
|
||||
extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
|
||||
@@ -27,14 +27,21 @@ struct fgraph_cpu_data {
|
||||
unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
|
||||
};
|
||||
|
||||
struct fgraph_ent_args {
|
||||
struct ftrace_graph_ent_entry ent;
|
||||
/* Force the sizeof of args[] to have FTRACE_REGS_MAX_ARGS entries */
|
||||
unsigned long args[FTRACE_REGS_MAX_ARGS];
|
||||
};
|
||||
|
||||
struct fgraph_data {
|
||||
struct fgraph_cpu_data __percpu *cpu_data;
|
||||
|
||||
/* Place to preserve last processed entry. */
|
||||
union {
|
||||
struct ftrace_graph_ent_entry ent;
|
||||
struct fgraph_ent_args ent;
|
||||
/* TODO allow retaddr to have args */
|
||||
struct fgraph_retaddr_ent_entry rent;
|
||||
} ent;
|
||||
};
|
||||
struct ftrace_graph_ret_entry ret;
|
||||
int failed;
|
||||
int cpu;
|
||||
@@ -627,10 +634,13 @@ get_return_for_leaf(struct trace_iterator *iter,
|
||||
* Save current and next entries for later reference
|
||||
* if the output fails.
|
||||
*/
|
||||
if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT))
|
||||
data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr;
|
||||
else
|
||||
data->ent.ent = *curr;
|
||||
if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT)) {
|
||||
data->rent = *(struct fgraph_retaddr_ent_entry *)curr;
|
||||
} else {
|
||||
int size = min((int)sizeof(data->ent), (int)iter->ent_size);
|
||||
|
||||
memcpy(&data->ent, curr, size);
|
||||
}
|
||||
/*
|
||||
* If the next event is not a return type, then
|
||||
* we only care about what type it is. Otherwise we can
|
||||
|
||||
Reference in New Issue
Block a user