hfsplus: fix uninit-value by validating catalog record size

Syzbot reported a KMSAN uninit-value issue in hfsplus_strcasecmp(). The
root cause is that hfs_brec_read() doesn't validate that the on-disk
record size matches the expected size for the record type being read.

When mounting a corrupted filesystem, hfs_brec_read() may read less data
than expected. For example, when reading a catalog thread record, the
debug output showed:

  HFSPLUS_BREC_READ: rec_len=520, fd->entrylength=26
  HFSPLUS_BREC_READ: WARNING - entrylength (26) < rec_len (520) - PARTIAL READ!

hfs_brec_read() only validates that entrylength is not greater than the
buffer size, but doesn't check if it's less than expected. It successfully
reads 26 bytes into a 520-byte structure and returns success, leaving 494
bytes uninitialized.

This uninitialized data in tmp.thread.nodeName then gets copied by
hfsplus_cat_build_key_uni() and used by hfsplus_strcasecmp(), triggering
the KMSAN warning when the uninitialized bytes are used as array indices
in case_fold().

Fix by introducing hfsplus_brec_read_cat() wrapper that:
1. Calls hfs_brec_read() to read the data
2. Validates the record size based on the type field:
   - Fixed size for folder and file records
   - Variable size for thread records (depends on string length)
3. Returns -EIO if size doesn't match expected

For thread records, check against HFSPLUS_MIN_THREAD_SZ before reading
nodeName.length to avoid reading uninitialized data at call sites that
don't zero-initialize the entry structure.

Also initialize the tmp variable in hfsplus_find_cat() as defensive
programming to ensure no uninitialized data even if validation is
bypassed.

Reported-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=d80abb5b890d39261e72
Fixes: 1da177e4c3 ("Linux-2.6.12-rc2")
Tested-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
Suggested-by: Charalampos Mitrodimas <charmitro@posteo.net>
Link: https://lore.kernel.org/all/20260120051114.1281285-1-kartikey406@gmail.com/ [v1]
Link: https://lore.kernel.org/all/20260121063109.1830263-1-kartikey406@gmail.com/ [v2]
Link: https://lore.kernel.org/all/20260212014233.2422046-1-kartikey406@gmail.com/ [v3]
Link: https://lore.kernel.org/all/20260214002100.436125-1-kartikey406@gmail.com/T/ [v4]
Link: https://lore.kernel.org/all/20260221061626.15853-1-kartikey406@gmail.com/T/ [v5]
Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
Link: https://lore.kernel.org/r/20260307010302.41547-1-kartikey406@gmail.com
Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
This commit is contained in:
Deepanshu Kartikey
2026-03-07 06:33:02 +05:30
committed by Viacheslav Dubeyko
parent ee8422d00b
commit b6b592275a
5 changed files with 64 additions and 4 deletions

View File

@@ -287,3 +287,54 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
fd->bnode = bnode;
return res;
}
/**
* hfsplus_brec_read_cat - read and validate a catalog record
* @fd: find data structure
* @entry: pointer to catalog entry to read into
*
* Reads a catalog record and validates its size matches the expected
* size based on the record type.
*
* Returns 0 on success, or negative error code on failure.
*/
int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry)
{
int res;
u32 expected_size;
res = hfs_brec_read(fd, entry, sizeof(hfsplus_cat_entry));
if (res)
return res;
/* Validate catalog record size based on type */
switch (be16_to_cpu(entry->type)) {
case HFSPLUS_FOLDER:
expected_size = sizeof(struct hfsplus_cat_folder);
break;
case HFSPLUS_FILE:
expected_size = sizeof(struct hfsplus_cat_file);
break;
case HFSPLUS_FOLDER_THREAD:
case HFSPLUS_FILE_THREAD:
/* Ensure we have at least the fixed fields before reading nodeName.length */
if (fd->entrylength < HFSPLUS_MIN_THREAD_SZ) {
pr_err("thread record too short (got %u)\n", fd->entrylength);
return -EIO;
}
expected_size = hfsplus_cat_thread_size(&entry->thread);
break;
default:
pr_err("unknown catalog record type %d\n",
be16_to_cpu(entry->type));
return -EIO;
}
if (fd->entrylength != expected_size) {
pr_err("catalog record size mismatch (type %d, got %u, expected %u)\n",
be16_to_cpu(entry->type), fd->entrylength, expected_size);
return -EIO;
}
return 0;
}

View File

@@ -194,12 +194,12 @@ static int hfsplus_fill_cat_thread(struct super_block *sb,
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfsplus_cat_entry tmp;
hfsplus_cat_entry tmp = {0};
int err;
u16 type;
hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
err = hfsplus_brec_read_cat(fd, &tmp);
if (err)
return err;

View File

@@ -49,7 +49,7 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
if (unlikely(err < 0))
goto fail;
again:
err = hfs_brec_read(&fd, &entry, sizeof(entry));
err = hfsplus_brec_read_cat(&fd, &entry);
if (err) {
if (err == -ENOENT) {
hfs_find_exit(&fd);

View File

@@ -516,6 +516,15 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
void **data, blk_opf_t opf);
int hfsplus_read_wrapper(struct super_block *sb);
static inline u32 hfsplus_cat_thread_size(const struct hfsplus_cat_thread *thread)
{
return offsetof(struct hfsplus_cat_thread, nodeName) +
offsetof(struct hfsplus_unistr, unicode) +
be16_to_cpu(thread->nodeName.length) * sizeof(hfsplus_unichr);
}
int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry);
/*
* time helpers: convert between 1904-base and 1970-base timestamps
*

View File

@@ -571,7 +571,7 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
if (unlikely(err < 0))
goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
if (!hfsplus_brec_read_cat(&fd, &entry)) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
err = -EIO;