mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 14:51:51 -04:00
ksmbd_conn_free() is one of four sites that can observe the last
refcount drop of a struct ksmbd_conn. The other three
fs/smb/server/connection.c ksmbd_conn_r_count_dec()
fs/smb/server/oplock.c __free_opinfo()
fs/smb/server/vfs_cache.c session_fd_check()
end the conn with a bare kfree(), skipping
ida_destroy(&conn->async_ida) and
conn->transport->ops->free_transport(conn->transport). Whenever one
of them is the last putter, the embedded async_ida and the entire
transport struct leak -- for TCP, that is also the struct socket and
the kvec iov.
__free_opinfo() being a final putter is not theoretical. opinfo_put()
queues the callback via call_rcu(&opinfo->rcu, free_opinfo_rcu), so
ksmbd_server_terminate_conn() can deposit N opinfo releases in RCU and
have ksmbd_conn_free() run in the handler thread before any of them
fire. ksmbd_conn_free() then observes refcnt > 0 and short-circuits;
the last RCU-delivered __free_opinfo() falls onto its bare kfree(conn)
branch and the transport is lost.
A/B validation in a QEMU/virtme guest, mounting //127.0.0.1/testshare:
each iteration holds 8 files open via sleep processes, force-closes
TCP with "ss -K sport = :445", kills the holders, lazy-umounts;
repeated 10 times, then ksmbd shutdown and kmemleak scan.
state conn_alloc conn_free tcp_free opi_rcu kmemleak
---------- ---------- --------- -------- ------- --------
pre-patch 20 20 10 160 7
with patch 20 20 20 160 0
Pre-patch conn_free=20 with tcp_free=10 directly demonstrates the
bare-kfree paths skipping transport cleanup; kmemleak backtraces point
into struct tcp_transport / iov. With this patch tcp_free matches
conn_free at 20/20 and kmemleak is clean.
Move the per-struct final release into __ksmbd_conn_release_work() and
route the three bare-kfree final-put sites through a new
ksmbd_conn_put(). Those sites now pair ida_destroy() and
free_transport() with kfree(conn) regardless of which holder happens
to release the last reference. stop_sessions() only triggers the
transport shutdown and does not itself drop the last conn reference,
so it is unaffected.
The centralized release reaches sock_release() -> tcp_close() ->
lock_sock_nested() (might_sleep) from every final putter, including
__free_opinfo() invoked from an RCU softirq callback, which trips
CONFIG_DEBUG_ATOMIC_SLEEP. Defer the release to a dedicated
ksmbd_conn_wq workqueue so ksmbd_conn_put() is safe from any
non-sleeping context.
Make ksmbd_file own a strong connection reference while fp->conn is
non-NULL so durable-preserve and final-close paths cannot dereference
a stale connection. ksmbd_open_fd() and ksmbd_reopen_durable_fd()
take the reference via ksmbd_conn_get() (the latter also reorders the
fp->conn / fp->tcon assignments before __open_id() so the published fp
is never observed with fp->conn == NULL); session_fd_check() and
__ksmbd_close_fd() drop it via ksmbd_conn_put(). With that invariant,
session_fd_check() can take a local conn pointer once and use it
across the m_op_list and lock_list iterations even though op->conn
puts may otherwise drop the last reference.
At module exit the workqueue is flushed and destroyed after
rcu_barrier(), so any release queued by a trailing RCU callback is
drained before the inode hash and module text go away.
Fixes: ee426bfb9d ("ksmbd: add refcnt to ksmbd_conn struct")
Signed-off-by: DaeMyung Kang <charsyam@gmail.com>
Acked-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
267 lines
7.2 KiB
C
267 lines
7.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
|
|
*/
|
|
|
|
#ifndef __KSMBD_CONNECTION_H__
|
|
#define __KSMBD_CONNECTION_H__
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/inet.h>
|
|
#include <linux/ip.h>
|
|
#include <net/sock.h>
|
|
#include <net/tcp.h>
|
|
#include <net/inet_connection_sock.h>
|
|
#include <net/request_sock.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/nls.h>
|
|
#include <linux/unicode.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "smb_common.h"
|
|
#include "ksmbd_work.h"
|
|
|
|
struct smbdirect_buffer_descriptor_v1;
|
|
|
|
#define KSMBD_SOCKET_BACKLOG 16
|
|
|
|
enum {
|
|
KSMBD_SESS_NEW = 0,
|
|
KSMBD_SESS_GOOD,
|
|
KSMBD_SESS_EXITING,
|
|
KSMBD_SESS_NEED_RECONNECT,
|
|
KSMBD_SESS_NEED_NEGOTIATE,
|
|
KSMBD_SESS_NEED_SETUP,
|
|
KSMBD_SESS_RELEASING
|
|
};
|
|
|
|
struct ksmbd_conn_stats {
|
|
atomic_t open_files_count;
|
|
atomic64_t request_served;
|
|
};
|
|
|
|
struct ksmbd_transport;
|
|
|
|
struct ksmbd_conn {
|
|
struct smb_version_values *vals;
|
|
struct smb_version_ops *ops;
|
|
struct smb_version_cmds *cmds;
|
|
unsigned int max_cmds;
|
|
struct mutex srv_mutex;
|
|
int status;
|
|
unsigned int cli_cap;
|
|
bool stop_called;
|
|
union {
|
|
__be32 inet_addr;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
u8 inet6_addr[16];
|
|
#endif
|
|
};
|
|
unsigned int inet_hash;
|
|
char *request_buf;
|
|
struct ksmbd_transport *transport;
|
|
struct nls_table *local_nls;
|
|
struct unicode_map *um;
|
|
struct hlist_node hlist;
|
|
struct rw_semaphore session_lock;
|
|
/* smb session 1 per user */
|
|
struct xarray sessions;
|
|
unsigned long last_active;
|
|
/* How many request are running currently */
|
|
atomic_t req_running;
|
|
/* References which are made for this Server object*/
|
|
atomic_t r_count;
|
|
unsigned int total_credits;
|
|
unsigned int outstanding_credits;
|
|
spinlock_t credits_lock;
|
|
wait_queue_head_t req_running_q;
|
|
wait_queue_head_t r_count_q;
|
|
/* Lock to protect requests list*/
|
|
spinlock_t request_lock;
|
|
struct list_head requests;
|
|
struct list_head async_requests;
|
|
int connection_type;
|
|
struct ksmbd_conn_stats stats;
|
|
char ClientGUID[SMB2_CLIENT_GUID_SIZE];
|
|
struct ntlmssp_auth ntlmssp;
|
|
|
|
spinlock_t llist_lock;
|
|
struct list_head lock_list;
|
|
|
|
struct preauth_integrity_info *preauth_info;
|
|
|
|
bool need_neg;
|
|
unsigned int auth_mechs;
|
|
unsigned int preferred_auth_mech;
|
|
bool sign;
|
|
bool use_spnego:1;
|
|
__u16 cli_sec_mode;
|
|
__u16 srv_sec_mode;
|
|
/* dialect index that server chose */
|
|
__u16 dialect;
|
|
|
|
char *mechToken;
|
|
unsigned int mechTokenLen;
|
|
|
|
struct ksmbd_conn_ops *conn_ops;
|
|
|
|
/* Preauth Session Table */
|
|
struct list_head preauth_sess_table;
|
|
|
|
struct sockaddr_storage peer_addr;
|
|
|
|
/* Identifier for async message */
|
|
struct ida async_ida;
|
|
|
|
__le16 cipher_type;
|
|
__le16 compress_algorithm;
|
|
bool posix_ext_supported;
|
|
bool signing_negotiated;
|
|
__le16 signing_algorithm;
|
|
bool binding;
|
|
atomic_t refcnt;
|
|
bool is_aapl;
|
|
struct work_struct release_work;
|
|
};
|
|
|
|
struct ksmbd_conn_ops {
|
|
int (*process_fn)(struct ksmbd_conn *conn);
|
|
int (*terminate_fn)(struct ksmbd_conn *conn);
|
|
};
|
|
|
|
struct ksmbd_transport_ops {
|
|
void (*disconnect)(struct ksmbd_transport *t);
|
|
void (*shutdown)(struct ksmbd_transport *t);
|
|
int (*read)(struct ksmbd_transport *t, char *buf,
|
|
unsigned int size, int max_retries);
|
|
int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
|
|
int size, bool need_invalidate_rkey,
|
|
unsigned int remote_key);
|
|
int (*rdma_read)(struct ksmbd_transport *t,
|
|
void *buf, unsigned int len,
|
|
struct smbdirect_buffer_descriptor_v1 *desc,
|
|
unsigned int desc_len);
|
|
int (*rdma_write)(struct ksmbd_transport *t,
|
|
void *buf, unsigned int len,
|
|
struct smbdirect_buffer_descriptor_v1 *desc,
|
|
unsigned int desc_len);
|
|
void (*free_transport)(struct ksmbd_transport *kt);
|
|
};
|
|
|
|
struct ksmbd_transport {
|
|
struct ksmbd_conn *conn;
|
|
const struct ksmbd_transport_ops *ops;
|
|
};
|
|
|
|
#define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
|
|
#define KSMBD_TCP_SEND_TIMEOUT (5 * HZ)
|
|
#define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
|
|
|
|
#define CONN_HASH_BITS 12
|
|
extern DECLARE_HASHTABLE(conn_list, CONN_HASH_BITS);
|
|
extern struct rw_semaphore conn_list_lock;
|
|
|
|
bool ksmbd_conn_alive(struct ksmbd_conn *conn);
|
|
void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
|
|
int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id);
|
|
struct ksmbd_conn *ksmbd_conn_alloc(void);
|
|
void ksmbd_conn_free(struct ksmbd_conn *conn);
|
|
struct ksmbd_conn *ksmbd_conn_get(struct ksmbd_conn *conn);
|
|
void ksmbd_conn_put(struct ksmbd_conn *conn);
|
|
int ksmbd_conn_wq_init(void);
|
|
void ksmbd_conn_wq_destroy(void);
|
|
bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
|
|
int ksmbd_conn_write(struct ksmbd_work *work);
|
|
int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
|
|
void *buf, unsigned int buflen,
|
|
struct smbdirect_buffer_descriptor_v1 *desc,
|
|
unsigned int desc_len);
|
|
int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
|
|
void *buf, unsigned int buflen,
|
|
struct smbdirect_buffer_descriptor_v1 *desc,
|
|
unsigned int desc_len);
|
|
void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
|
|
void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
|
|
void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
|
|
int ksmbd_conn_handler_loop(void *p);
|
|
int ksmbd_conn_transport_init(void);
|
|
void ksmbd_conn_transport_destroy(void);
|
|
void ksmbd_conn_lock(struct ksmbd_conn *conn);
|
|
void ksmbd_conn_unlock(struct ksmbd_conn *conn);
|
|
void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn);
|
|
void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn);
|
|
|
|
/*
|
|
* WARNING
|
|
*
|
|
* This is a hack. We will move status to a proper place once we land
|
|
* a multi-sessions support.
|
|
*/
|
|
static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
|
|
{
|
|
return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
|
|
}
|
|
|
|
static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
|
|
{
|
|
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
|
|
}
|
|
|
|
static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn)
|
|
{
|
|
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP;
|
|
}
|
|
|
|
static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
|
|
{
|
|
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
|
|
}
|
|
|
|
static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
|
|
{
|
|
return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
|
|
}
|
|
|
|
static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
|
|
{
|
|
return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP);
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
|
|
}
|
|
|
|
static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
|
|
{
|
|
WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
|
|
}
|
|
|
|
void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
|
|
#endif /* __CONNECTION_H__ */
|