Merge branch 'akpm' (patches from Andrew)

Pull updates from Andrew Morton:
 "Most of -mm and quite a number of other subsystems: hotfixes, scripts,
  ocfs2, misc, lib, binfmt, init, reiserfs, exec, dma-mapping, kcov.

  MM is fairly quiet this time.  Holidays, I assume"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (118 commits)
  kcov: ignore fault-inject and stacktrace
  include/linux/io-mapping.h-mapping: use PHYS_PFN() macro in io_mapping_map_atomic_wc()
  execve: warn if process starts with executable stack
  reiserfs: prevent NULL pointer dereference in reiserfs_insert_item()
  init/main.c: fix misleading "This architecture does not have kernel memory protection" message
  init/main.c: fix quoted value handling in unknown_bootoption
  init/main.c: remove unnecessary repair_env_string in do_initcall_level
  init/main.c: log arguments and environment passed to init
  fs/binfmt_elf.c: coredump: allow process with empty address space to coredump
  fs/binfmt_elf.c: coredump: delete duplicated overflow check
  fs/binfmt_elf.c: coredump: allocate core ELF header on stack
  fs/binfmt_elf.c: make BAD_ADDR() unlikely
  fs/binfmt_elf.c: better codegen around current->mm
  fs/binfmt_elf.c: don't copy ELF header around
  fs/binfmt_elf.c: fix ->start_code calculation
  fs/binfmt_elf.c: smaller code generation around auxv vector fill
  lib/find_bit.c: uninline helper _find_next_bit()
  lib/find_bit.c: join _find_next_bit{_le}
  uapi: rename ext2_swab() to swab() and share globally in swab.h
  lib/scatterlist.c: adjust indentation in __sg_alloc_table
  ...
This commit is contained in:
Linus Torvalds
2020-01-31 12:16:36 -08:00
136 changed files with 2722 additions and 1291 deletions

View File

@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/writeback.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
@@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << WB_async_congested));
}
extern const char *bdi_unknown_name;
static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
{
if (!bdi || !bdi->dev)
return bdi_unknown_name;
return dev_name(bdi->dev);
}
#endif /* _LINUX_BACKING_DEV_H */

View File

@@ -13,6 +13,7 @@
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);

View File

@@ -2737,7 +2737,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
@@ -2747,6 +2746,11 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int filemap_check_errors(struct address_space *mapping);
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
static inline int filemap_write_and_wait(struct address_space *mapping)
{
return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
}
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);

View File

@@ -28,6 +28,7 @@ struct io_mapping {
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
#include <linux/pfn.h>
#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
@@ -64,12 +65,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
resource_size_t phys_addr;
unsigned long pfn;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
return iomap_atomic_prot_pfn(pfn, mapping->prot);
return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void

View File

@@ -113,6 +113,9 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
@@ -127,10 +130,6 @@ void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
int nid, enum memblock_flags flags);
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,

View File

@@ -29,8 +29,6 @@ struct memory_block {
int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
struct device dev;
int nid; /* NID for this memory block */
};
@@ -55,19 +53,6 @@ struct memory_notify {
int status_change_nid;
};
/*
* During pageblock isolation, count the number of pages within the
* range [start_pfn, start_pfn + nr_pages) which are owned by code
* in the notifier chain.
*/
#define MEM_ISOLATE_COUNT (1<<0)
struct memory_isolate_notify {
unsigned long start_pfn; /* Start of range to check */
unsigned int nr_pages; /* # pages in range to check */
unsigned int pages_found; /* # pages owned found by callbacks */
};
struct notifier_block;
struct mem_section;
@@ -94,27 +79,13 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
static inline int register_memory_isolate_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
{
}
static inline int memory_isolate_notify(unsigned long val, void *v)
{
return 0;
}
#else
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,

View File

@@ -94,7 +94,8 @@ extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long, int);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
int online_type, int nid);
extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end);
extern unsigned long __offline_isolated_pages(unsigned long start_pfn,

View File

@@ -70,11 +70,6 @@ static inline void totalram_pages_add(long count)
atomic_long_add(count, &_totalram_pages);
}
static inline void totalram_pages_set(long val)
{
atomic_long_set(&_totalram_pages, val);
}
extern void * high_memory;
extern int page_cluster;
@@ -916,10 +911,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
@@ -947,9 +938,10 @@ static inline bool is_zone_device_page(const struct page *page)
#endif
#ifdef CONFIG_DEV_PAGEMAP_OPS
void __put_devmap_managed_page(struct page *page);
void free_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
static inline bool put_devmap_managed_page(struct page *page)
static inline bool page_is_devmap_managed(struct page *page)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
@@ -958,7 +950,6 @@ static inline bool put_devmap_managed_page(struct page *page)
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_FS_DAX:
__put_devmap_managed_page(page);
return true;
default:
break;
@@ -966,11 +957,17 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
}
void put_devmap_managed_page(struct page *page);
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool put_devmap_managed_page(struct page *page)
static inline bool page_is_devmap_managed(struct page *page)
{
return false;
}
static inline void put_devmap_managed_page(struct page *page)
{
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
@@ -1023,37 +1020,37 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
if (put_devmap_managed_page(page))
if (page_is_devmap_managed(page)) {
put_devmap_managed_page(page);
return;
}
if (put_page_testzero(page))
__put_page(page);
}
/**
* put_user_page() - release a gup-pinned page
* unpin_user_page() - release a gup-pinned page
* @page: pointer to page to be released
*
* Pages that were pinned via get_user_pages*() must be released via
* either put_user_page(), or one of the put_user_pages*() routines
* below. This is so that eventually, pages that are pinned via
* get_user_pages*() can be separately tracked and uniquely handled. In
* particular, interactions with RDMA and filesystems need special
* handling.
* Pages that were pinned via pin_user_pages*() must be released via either
* unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
* that eventually such pages can be separately tracked and uniquely handled. In
* particular, interactions with RDMA and filesystems need special handling.
*
* put_user_page() and put_page() are not interchangeable, despite this early
* implementation that makes them look the same. put_user_page() calls must
* be perfectly matched up with get_user_page() calls.
* unpin_user_page() and put_page() are not interchangeable, despite this early
* implementation that makes them look the same. unpin_user_page() calls must
* be perfectly matched up with pin*() calls.
*/
static inline void put_user_page(struct page *page)
static inline void unpin_user_page(struct page *page)
{
put_page(page);
}
void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void put_user_pages(struct page **pages, unsigned long npages);
void unpin_user_pages(struct page **pages, unsigned long npages);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
@@ -1501,9 +1498,16 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1511,6 +1515,8 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
@@ -2575,13 +2581,15 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
/*
* NOTE on FOLL_LONGTERM:
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
* other. Here is what they mean, and how to use them:
*
* FOLL_LONGTERM indicates that the page will be held for an indefinite time
* period _often_ under userspace control. This is contrasted with
* iov_iter_get_pages() where usages which are transient.
* period _often_ under userspace control. This is in contrast to
* iov_iter_get_pages(), whose usages are transient.
*
* FIXME: For pages which are part of a filesystem, mappings are subject to the
* lifetime enforced by the filesystem and we need guarantees that longterm
@@ -2596,11 +2604,39 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
* Currently only get_user_pages() and get_user_pages_fast() support this flag
* and calls to get_user_pages_[un]locked are specifically not allowed. This
* is due to an incompatibility with the FS DAX check and
* FAULT_FLAG_ALLOW_RETRY
* FAULT_FLAG_ALLOW_RETRY.
*
* In the CMA case: longterm pins in a CMA region would unnecessarily fragment
* that region. And so CMA attempts to migrate the page before pinning when
* In the CMA case: long term pins in a CMA region would unnecessarily fragment
* that region. And so, CMA attempts to migrate the page before pinning, when
* FOLL_LONGTERM is specified.
*
* FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
* but an additional pin counting system) will be invoked. This is intended for
* anything that gets a page reference and then touches page data (for example,
* Direct IO). This lets the filesystem know that some non-file-system entity is
* potentially changing the pages' data. In contrast to FOLL_GET (whose pages
* are released via put_page()), FOLL_PIN pages must be released, ultimately, by
* a call to unpin_user_page().
*
* FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
* and separate refcounting mechanisms, however, and that means that each has
* its own acquire and release mechanisms:
*
* FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
*
* FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
*
* FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
* (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
* calls applied to them, and that's perfectly OK. This is a constraint on the
* callers, not on the pages.)
*
* FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
* directly by the caller. That's in order to help avoid mismatches when
* releasing pages: get_user_pages*() pages must be released via put_page(),
* while pin_user_pages*() pages must be released via unpin_user_page().
*
* Please see Documentation/vm/pin_user_pages.rst for more information.
*/
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)

View File

@@ -758,7 +758,7 @@ typedef struct pglist_data {
#ifdef CONFIG_NUMA
/*
* zone reclaim becomes active if more unmapped pages exist.
* node reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;

View File

@@ -33,8 +33,8 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype, int flags);
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);

View File

@@ -7,6 +7,7 @@
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
# define swab __swab
# define swahw32 __swahw32
# define swahb32 __swahb32
# define swab16p __swab16p

View File

@@ -32,17 +32,6 @@
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
/* Unit conversion macros */
#define DECI_KELVIN_TO_CELSIUS(t) ({ \
long _t = (t); \
((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \
})
#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732)
#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"

84
include/linux/units.h Normal file
View File

@@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H
#include <linux/kernel.h>
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
static inline long milli_kelvin_to_millicelsius(long t)
{
return t + ABSOLUTE_ZERO_MILLICELSIUS;
}
static inline long millicelsius_to_milli_kelvin(long t)
{
return t - ABSOLUTE_ZERO_MILLICELSIUS;
}
#define MILLIDEGREE_PER_DEGREE 1000
#define MILLIDEGREE_PER_DECIDEGREE 100
static inline long kelvin_to_millicelsius(long t)
{
return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE);
}
static inline long millicelsius_to_kelvin(long t)
{
t = millicelsius_to_milli_kelvin(t);
return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
}
static inline long deci_kelvin_to_celsius(long t)
{
t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
}
static inline long celsius_to_deci_kelvin(long t)
{
t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE);
return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
}
/**
* deci_kelvin_to_millicelsius_with_offset - convert Kelvin to Celsius
* @t: temperature value in decidegrees Kelvin
* @offset: difference between Kelvin and Celsius in millidegrees
*
* Return: temperature value in millidegrees Celsius
*/
static inline long deci_kelvin_to_millicelsius_with_offset(long t, long offset)
{
return t * MILLIDEGREE_PER_DECIDEGREE - offset;
}
static inline long deci_kelvin_to_millicelsius(long t)
{
return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
}
static inline long millicelsius_to_deci_kelvin(long t)
{
t = millicelsius_to_milli_kelvin(t);
return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
}
static inline long kelvin_to_celsius(long t)
{
return t + DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
MILLIDEGREE_PER_DEGREE);
}
static inline long celsius_to_kelvin(long t)
{
return t - DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
MILLIDEGREE_PER_DEGREE);
}
#endif /* _LINUX_UNITS_H */

View File

@@ -191,6 +191,12 @@ extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
exceed those passed here.
*/
extern int zlib_deflate_dfltcc_enabled (void);
/*
Returns 1 if Deflate-Conversion facility is installed and enabled,
otherwise 0.
*/
/*
extern int deflateInit (z_streamp strm, int level);

View File

@@ -88,8 +88,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->node = node;
),
TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
__entry->call_site,
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,

View File

@@ -67,8 +67,8 @@ DECLARE_EVENT_CLASS(writeback_page_template,
TP_fast_assign(
strscpy_pad(__entry->name,
mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
32);
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
strscpy_pad(__entry->name,
bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -193,7 +192,7 @@ TRACE_EVENT(inode_foreign_history,
),
TP_fast_assign(
strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
@@ -222,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs,
),
TP_fast_assign(
strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32);
strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
@@ -255,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty,
struct address_space *mapping = page_mapping(page);
struct inode *inode = mapping ? mapping->host : NULL;
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
@@ -288,7 +287,7 @@ TRACE_EVENT(flush_foreign,
),
TP_fast_assign(
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;
@@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_fast_assign(
strscpy_pad(__entry->name,
dev_name(inode_to_bdi(inode)->dev), 32);
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -361,9 +360,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name,
wb->bdi->dev ? dev_name(wb->bdi->dev) :
"(unknown)", 32);
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -416,7 +413,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%lu",
@@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -463,7 +460,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -514,7 +511,7 @@ TRACE_EVENT(writeback_queue_io,
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -600,7 +597,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -665,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -726,7 +723,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
TP_fast_assign(
strscpy_pad(__entry->name,
dev_name(inode_to_bdi(inode)->dev), 32);
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -800,7 +797,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_fast_assign(
strscpy_pad(__entry->name,
dev_name(inode_to_bdi(inode)->dev), 32);
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;

View File

@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/bitsperlong.h>
#include <asm/swab.h>
/*
@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
__fswab64(x))
#endif
static __always_inline unsigned long __swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return __swab64(y);
#else /* BITS_PER_LONG == 32 */
return __swab32(y);
#endif
}
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap

View File

@@ -195,7 +195,7 @@ enum
VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */
VM_MIN_SLAB=35, /* Percent pages ignored by node reclaim */
};