Merge branch 'macsec-use-rcu_work-to-fix-crypto-cleanup-in-softirq-context'

Jinliang Zheng says:

====================
macsec: use rcu_work to fix crypto cleanup in softirq context

From: Jinliang Zheng <alexjlzheng@tencent.com>

crypto_free_aead() can internally call vunmap() (e.g. via dma_free_attrs()
in hardware crypto drivers like hisi_sec2), which must not be invoked from
softirq context. Both free_rxsa() and free_txsa() are RCU callbacks that
run in softirq, causing a kernel crash on affected hardware.

This series fixes the issue by deferring the actual cleanup to a workqueue
using rcu_work, which combines the RCU grace period and workqueue dispatch
into a single primitive.

Two design decisions worth noting:

1. rcu_work instead of schedule_work() + synchronize_rcu()

   An alternative would be to call schedule_work() directly from
   macsec_rxsa_put()/macsec_txsa_put(), then call synchronize_rcu() at
   the start of the work handler to replace the grace period previously
   provided by call_rcu(). However, synchronize_rcu() blocks the worker
   thread for the duration of a full RCU grace period. Under high SA
   churn (e.g. tearing down an interface with many SAs), each SA would
   occupy a worker thread while waiting, and multiple concurrent calls
   cannot share the same grace period — leading to unnecessary latency
   and resource waste.

   rcu_work uses call_rcu_hurry() internally, which is fully asynchronous:
   the worker thread is only dispatched after the grace period has elapsed,
   and multiple concurrent queue_rcu_work() calls naturally batch under the
   same grace period via the RCU subsystem's existing coalescing mechanism.

2. Dedicated workqueue instead of system_wq

   Using a dedicated workqueue (macsec_wq) allows macsec_exit() to drain
   exactly the work items belonging to this module — by calling
   destroy_workqueue() after rcu_barrier(). If system_wq were used,
   flush_scheduled_work() would drain all pending work items across the
   entire system, creating unnecessary coupling with unrelated subsystems
   and potentially causing unexpected delays. The dedicated workqueue
   provides a clean, contained teardown path.
====================

Link: https://patch.msgid.link/20260511153102.2640368-1-alexjlzheng@tencent.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2026-05-13 19:03:07 -07:00
2 changed files with 33 additions and 13 deletions

View File

@@ -26,6 +26,8 @@
#include <uapi/linux/if_macsec.h>
static struct workqueue_struct *macsec_wq;
/* SecTAG length = macsec_eth_header without the optional SCI */
#define MACSEC_TAG_LEN 6
@@ -174,9 +176,10 @@ static void macsec_rxsc_put(struct macsec_rx_sc *sc)
call_rcu(&sc->rcu_head, free_rx_sc_rcu);
}
static void free_rxsa(struct rcu_head *head)
static void free_rxsa_work(struct work_struct *work)
{
struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
struct macsec_rx_sa *sa =
container_of(to_rcu_work(work), struct macsec_rx_sa, destroy_work);
crypto_free_aead(sa->key.tfm);
free_percpu(sa->stats);
@@ -186,7 +189,7 @@ static void free_rxsa(struct rcu_head *head)
static void macsec_rxsa_put(struct macsec_rx_sa *sa)
{
if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_rxsa);
queue_rcu_work(macsec_wq, &sa->destroy_work);
}
static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
@@ -202,9 +205,10 @@ static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
return sa;
}
static void free_txsa(struct rcu_head *head)
static void free_txsa_work(struct work_struct *work)
{
struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
struct macsec_tx_sa *sa =
container_of(to_rcu_work(work), struct macsec_tx_sa, destroy_work);
crypto_free_aead(sa->key.tfm);
free_percpu(sa->stats);
@@ -214,7 +218,7 @@ static void free_txsa(struct rcu_head *head)
static void macsec_txsa_put(struct macsec_tx_sa *sa)
{
if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_txsa);
queue_rcu_work(macsec_wq, &sa->destroy_work);
}
static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
@@ -1407,6 +1411,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
rx_sa->next_pn = 1;
refcount_set(&rx_sa->refcnt, 1);
spin_lock_init(&rx_sa->lock);
INIT_RCU_WORK(&rx_sa->destroy_work, free_rxsa_work);
return 0;
}
@@ -1506,6 +1511,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
tx_sa->active = false;
refcount_set(&tx_sa->refcnt, 1);
spin_lock_init(&tx_sa->lock);
INIT_RCU_WORK(&tx_sa->destroy_work, free_txsa_work);
return 0;
}
@@ -4505,25 +4511,35 @@ static int __init macsec_init(void)
{
int err;
macsec_wq = alloc_workqueue("macsec", WQ_UNBOUND, 0);
if (!macsec_wq)
return -ENOMEM;
pr_info("MACsec IEEE 802.1AE\n");
err = register_netdevice_notifier(&macsec_notifier);
if (err)
return err;
goto err_destroy_wq;
err = rtnl_link_register(&macsec_link_ops);
if (err)
goto notifier;
goto err_notifier;
err = genl_register_family(&macsec_fam);
if (err)
goto rtnl;
goto err_rtnl;
return 0;
rtnl:
err_rtnl:
rtnl_link_unregister(&macsec_link_ops);
notifier:
err_notifier:
unregister_netdevice_notifier(&macsec_notifier);
err_destroy_wq:
/* Precautionary, mirrors macsec_exit() to stay safe if work
* ever becomes queueable before this point in the future.
*/
rcu_barrier();
destroy_workqueue(macsec_wq);
return err;
}
@@ -4533,6 +4549,7 @@ static void __exit macsec_exit(void)
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
rcu_barrier();
destroy_workqueue(macsec_wq);
}
module_init(macsec_init);

View File

@@ -9,6 +9,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <uapi/linux/if_link.h>
#include <uapi/linux/if_macsec.h>
@@ -123,6 +124,7 @@ struct macsec_dev_stats {
* @key: key structure
* @ssci: short secure channel identifier
* @stats: per-SA stats
* @destroy_work: deferred work to free the SA in process context after RCU grace period
*/
struct macsec_rx_sa {
struct macsec_key key;
@@ -136,7 +138,7 @@ struct macsec_rx_sa {
bool active;
struct macsec_rx_sa_stats __percpu *stats;
struct macsec_rx_sc *sc;
struct rcu_head rcu;
struct rcu_work destroy_work;
};
struct pcpu_rx_sc_stats {
@@ -174,6 +176,7 @@ struct macsec_rx_sc {
* @key: key structure
* @ssci: short secure channel identifier
* @stats: per-SA stats
* @destroy_work: deferred work to free the SA in process context after RCU grace period
*/
struct macsec_tx_sa {
struct macsec_key key;
@@ -186,7 +189,7 @@ struct macsec_tx_sa {
refcount_t refcnt;
bool active;
struct macsec_tx_sa_stats __percpu *stats;
struct rcu_head rcu;
struct rcu_work destroy_work;
};
/**