mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-12 05:12:26 -05:00
Merge branch 'master' into next
This commit is contained in:
@@ -370,6 +370,11 @@
|
||||
{0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0, 0, 0}
|
||||
|
||||
#define r128_PCI_IDS \
|
||||
|
||||
@@ -1,31 +1,37 @@
|
||||
#ifndef DECOMPRESS_GENERIC_H
|
||||
#define DECOMPRESS_GENERIC_H
|
||||
|
||||
/* Minimal chunksize to be read.
|
||||
*Bzip2 prefers at least 4096
|
||||
*Lzma prefers 0x10000 */
|
||||
#define COMPR_IOBUF_SIZE 4096
|
||||
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*writebb)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *outbuf,
|
||||
int *posp,
|
||||
void(*error)(char *x));
|
||||
|
||||
/* inbuf - input buffer
|
||||
*len - len of pre-read data in inbuf
|
||||
*fill - function to fill inbuf if empty
|
||||
*writebb - function to write out outbug
|
||||
*fill - function to fill inbuf when empty
|
||||
*flush - function to write out outbuf
|
||||
*outbuf - output buffer
|
||||
*posp - if non-null, input position (number of bytes read) will be
|
||||
* returned here
|
||||
*
|
||||
*If len != 0, the inbuf is initialized (with as much data), and fill
|
||||
*should not be called
|
||||
*If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
|
||||
*fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
|
||||
*If len != 0, inbuf should contain all the necessary input data, and fill
|
||||
*should be NULL
|
||||
*If len = 0, inbuf can be NULL, in which case the decompressor will allocate
|
||||
*the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
|
||||
*fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
|
||||
*bytes should be read per call. Replace XXX with the appropriate decompressor
|
||||
*name, i.e. LZMA_IOBUF_SIZE.
|
||||
*
|
||||
*If flush = NULL, outbuf must be large enough to buffer all the expected
|
||||
*output. If flush != NULL, the output buffer will be allocated by the
|
||||
*decompressor (outbuf = NULL), and the flush function will be called to
|
||||
*flush the output buffer at the appropriate time (decompressor and stream
|
||||
*dependent).
|
||||
*/
|
||||
|
||||
|
||||
/* Utility routine to detect the decompression method */
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, int len,
|
||||
const char **name);
|
||||
|
||||
@@ -2137,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
|
||||
|
||||
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
|
||||
|
||||
extern struct inode * inode_init_always(struct super_block *, struct inode *);
|
||||
extern int inode_init_always(struct super_block *, struct inode *);
|
||||
extern void inode_init_once(struct inode *);
|
||||
extern void inode_add_to_lists(struct super_block *, struct inode *);
|
||||
extern void iput(struct inode *);
|
||||
@@ -2164,6 +2164,7 @@ extern void __iget(struct inode * inode);
|
||||
extern void iget_failed(struct inode *);
|
||||
extern void clear_inode(struct inode *);
|
||||
extern void destroy_inode(struct inode *);
|
||||
extern void __destroy_inode(struct inode *);
|
||||
extern struct inode *new_inode(struct super_block *);
|
||||
extern int should_remove_suid(struct dentry *);
|
||||
extern int file_remove_suid(struct file *);
|
||||
|
||||
@@ -89,7 +89,9 @@ enum print_line_t {
|
||||
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
||||
};
|
||||
|
||||
|
||||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
int pc);
|
||||
struct ring_buffer_event *
|
||||
trace_current_buffer_lock_reserve(int type, unsigned long len,
|
||||
unsigned long flags, int pc);
|
||||
@@ -119,11 +121,9 @@ struct ftrace_event_call {
|
||||
void *filter;
|
||||
void *mod;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
atomic_t profile_count;
|
||||
int (*profile_enable)(struct ftrace_event_call *);
|
||||
void (*profile_disable)(struct ftrace_event_call *);
|
||||
#endif
|
||||
atomic_t profile_count;
|
||||
int (*profile_enable)(struct ftrace_event_call *);
|
||||
void (*profile_disable)(struct ftrace_event_call *);
|
||||
};
|
||||
|
||||
#define MAX_FILTER_PRED 32
|
||||
|
||||
@@ -15,12 +15,13 @@
|
||||
#define KEY_COL(k) (((k) >> 16) & 0xff)
|
||||
#define KEY_VAL(k) ((k) & 0xffff)
|
||||
|
||||
#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
|
||||
|
||||
/**
|
||||
* struct matrix_keymap_data - keymap for matrix keyboards
|
||||
* @keymap: pointer to array of uint32 values encoded with KEY() macro
|
||||
* representing keymap
|
||||
* @keymap_size: number of entries (initialized) in this keymap
|
||||
* @max_keymap_size: maximum size of keymap supported by the device
|
||||
*
|
||||
* This structure is supposed to be used by platform code to supply
|
||||
* keymaps to drivers that implement matrix-like keypads/keyboards.
|
||||
@@ -28,14 +29,13 @@
|
||||
struct matrix_keymap_data {
|
||||
const uint32_t *keymap;
|
||||
unsigned int keymap_size;
|
||||
unsigned int max_keymap_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct matrix_keypad_platform_data - platform-dependent keypad data
|
||||
* @keymap_data: pointer to &matrix_keymap_data
|
||||
* @row_gpios: array of gpio numbers reporesenting rows
|
||||
* @col_gpios: array of gpio numbers reporesenting colums
|
||||
* @row_gpios: pointer to array of gpio numbers representing rows
|
||||
* @col_gpios: pointer to array of gpio numbers reporesenting colums
|
||||
* @num_row_gpios: actual number of row gpios used by device
|
||||
* @num_col_gpios: actual number of col gpios used by device
|
||||
* @col_scan_delay_us: delay, measured in microseconds, that is
|
||||
@@ -48,8 +48,9 @@ struct matrix_keymap_data {
|
||||
struct matrix_keypad_platform_data {
|
||||
const struct matrix_keymap_data *keymap_data;
|
||||
|
||||
unsigned int row_gpios[MATRIX_MAX_ROWS];
|
||||
unsigned int col_gpios[MATRIX_MAX_COLS];
|
||||
const unsigned int *row_gpios;
|
||||
const unsigned int *col_gpios;
|
||||
|
||||
unsigned int num_row_gpios;
|
||||
unsigned int num_col_gpios;
|
||||
|
||||
|
||||
@@ -110,6 +110,7 @@ struct kvm_memory_slot {
|
||||
|
||||
struct kvm_kernel_irq_routing_entry {
|
||||
u32 gsi;
|
||||
u32 type;
|
||||
int (*set)(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int level);
|
||||
union {
|
||||
|
||||
@@ -251,7 +251,7 @@ struct mtd_info {
|
||||
|
||||
static inline struct mtd_info *dev_to_mtd(struct device *dev)
|
||||
{
|
||||
return dev ? container_of(dev, struct mtd_info, dev) : NULL;
|
||||
return dev ? dev_get_drvdata(dev) : NULL;
|
||||
}
|
||||
|
||||
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
|
||||
|
||||
@@ -47,6 +47,8 @@ struct mtd_partition {
|
||||
#define MTDPART_SIZ_FULL (0)
|
||||
|
||||
|
||||
struct mtd_info;
|
||||
|
||||
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
|
||||
int del_mtd_partitions(struct mtd_info *);
|
||||
|
||||
|
||||
@@ -82,6 +82,12 @@
|
||||
* to generate slightly worse code. So use a simple one-line #define
|
||||
* for node_isset(), instead of wrapping an inline inside a macro, the
|
||||
* way we do the other calls.
|
||||
*
|
||||
* NODEMASK_SCRATCH
|
||||
* When doing above logical AND, OR, XOR, Remap operations the callers tend to
|
||||
* need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
|
||||
* nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
|
||||
* for such situations. See below and CPUMASK_ALLOC also.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
|
||||
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
|
||||
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
|
||||
|
||||
/*
|
||||
* For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
|
||||
*/
|
||||
|
||||
#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
|
||||
#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
|
||||
#define NODEMASK_FREE(m) kfree(m)
|
||||
#else
|
||||
#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
|
||||
#define NODEMASK_FREE(m)
|
||||
#endif
|
||||
|
||||
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
|
||||
struct nodemask_scratch {
|
||||
nodemask_t mask1;
|
||||
nodemask_t mask2;
|
||||
};
|
||||
|
||||
#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
|
||||
#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
|
||||
|
||||
|
||||
#endif /* __LINUX_NODEMASK_H */
|
||||
|
||||
@@ -121,8 +121,9 @@ enum perf_counter_sample_format {
|
||||
PERF_SAMPLE_CPU = 1U << 7,
|
||||
PERF_SAMPLE_PERIOD = 1U << 8,
|
||||
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
||||
PERF_SAMPLE_RAW = 1U << 10,
|
||||
|
||||
PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */
|
||||
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -368,6 +369,8 @@ enum perf_event_type {
|
||||
*
|
||||
* { u64 nr,
|
||||
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
|
||||
* { u32 size;
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_SAMPLE = 9,
|
||||
@@ -413,6 +416,11 @@ struct perf_callchain_entry {
|
||||
__u64 ip[PERF_MAX_STACK_DEPTH];
|
||||
};
|
||||
|
||||
struct perf_raw_record {
|
||||
u32 size;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/**
|
||||
@@ -681,6 +689,7 @@ struct perf_sample_data {
|
||||
struct pt_regs *regs;
|
||||
u64 addr;
|
||||
u64 period;
|
||||
struct perf_raw_record *raw;
|
||||
};
|
||||
|
||||
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
|
||||
@@ -144,6 +144,9 @@
|
||||
#undef TP_fast_assign
|
||||
#define TP_fast_assign(args...) args
|
||||
|
||||
#undef TP_perf_assign
|
||||
#define TP_perf_assign(args...)
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
|
||||
static int \
|
||||
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
|
||||
/*
|
||||
* Generate the functions needed for tracepoint perf_counter support.
|
||||
*
|
||||
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
|
||||
*
|
||||
* static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
|
||||
* {
|
||||
* int ret = 0;
|
||||
*
|
||||
* if (!atomic_inc_return(&event_call->profile_count))
|
||||
* ret = register_trace_<call>(ftrace_profile_<call>);
|
||||
*
|
||||
* return ret;
|
||||
* }
|
||||
*
|
||||
* static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
|
||||
* {
|
||||
* if (atomic_add_negative(-1, &event->call->profile_count))
|
||||
* unregister_trace_<call>(ftrace_profile_<call>);
|
||||
* }
|
||||
*
|
||||
*/
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
\
|
||||
static void ftrace_profile_##call(proto); \
|
||||
\
|
||||
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
|
||||
{ \
|
||||
int ret = 0; \
|
||||
\
|
||||
if (!atomic_inc_return(&event_call->profile_count)) \
|
||||
ret = register_trace_##call(ftrace_profile_##call); \
|
||||
\
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
|
||||
{ \
|
||||
if (atomic_add_negative(-1, &event_call->profile_count)) \
|
||||
unregister_trace_##call(ftrace_profile_##call); \
|
||||
}
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stage 4 of the trace events.
|
||||
*
|
||||
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
|
||||
#define TP_FMT(fmt, args...) fmt "\n", ##args
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#define _TRACE_PROFILE(call, proto, args) \
|
||||
static void ftrace_profile_##call(proto) \
|
||||
{ \
|
||||
extern void perf_tpcounter_event(int); \
|
||||
perf_tpcounter_event(event_##call.id); \
|
||||
} \
|
||||
\
|
||||
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
|
||||
{ \
|
||||
int ret = 0; \
|
||||
\
|
||||
if (!atomic_inc_return(&event_call->profile_count)) \
|
||||
ret = register_trace_##call(ftrace_profile_##call); \
|
||||
\
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
|
||||
{ \
|
||||
if (atomic_add_negative(-1, &event_call->profile_count)) \
|
||||
unregister_trace_##call(ftrace_profile_##call); \
|
||||
}
|
||||
|
||||
#define _TRACE_PROFILE_INIT(call) \
|
||||
.profile_count = ATOMIC_INIT(-1), \
|
||||
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
|
||||
.profile_disable = ftrace_profile_disable_##call,
|
||||
|
||||
#else
|
||||
#define _TRACE_PROFILE(call, proto, args)
|
||||
#define _TRACE_PROFILE_INIT(call)
|
||||
#endif
|
||||
|
||||
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
|
||||
\
|
||||
static struct ftrace_event_call event_##call; \
|
||||
\
|
||||
@@ -586,6 +615,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#undef _TRACE_PROFILE
|
||||
/*
|
||||
* Define the insertion callback to profile events
|
||||
*
|
||||
* The job is very similar to ftrace_raw_event_<call> except that we don't
|
||||
* insert in the ring buffer but in a perf counter.
|
||||
*
|
||||
* static void ftrace_profile_<call>(proto)
|
||||
* {
|
||||
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
|
||||
* struct ftrace_event_call *event_call = &event_<call>;
|
||||
* extern void perf_tpcounter_event(int, u64, u64, void *, int);
|
||||
* struct ftrace_raw_##call *entry;
|
||||
* u64 __addr = 0, __count = 1;
|
||||
* unsigned long irq_flags;
|
||||
* int __entry_size;
|
||||
* int __data_size;
|
||||
* int pc;
|
||||
*
|
||||
* local_save_flags(irq_flags);
|
||||
* pc = preempt_count();
|
||||
*
|
||||
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
|
||||
*
|
||||
* // Below we want to get the aligned size by taking into account
|
||||
* // the u32 field that will later store the buffer size
|
||||
* __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
|
||||
* sizeof(u64));
|
||||
* __entry_size -= sizeof(u32);
|
||||
*
|
||||
* do {
|
||||
* char raw_data[__entry_size]; <- allocate our sample in the stack
|
||||
* struct trace_entry *ent;
|
||||
*
|
||||
* zero dead bytes from alignment to avoid stack leak to userspace:
|
||||
*
|
||||
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
|
||||
* entry = (struct ftrace_raw_<call> *)raw_data;
|
||||
* ent = &entry->ent;
|
||||
* tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
* ent->type = event_call->id;
|
||||
*
|
||||
* <tstruct> <- do some jobs with dynamic arrays
|
||||
*
|
||||
* <assign> <- affect our values
|
||||
*
|
||||
* perf_tpcounter_event(event_call->id, __addr, __count, entry,
|
||||
* __entry_size); <- submit them to perf counter
|
||||
* } while (0);
|
||||
*
|
||||
* }
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
|
||||
#undef __perf_addr
|
||||
#define __perf_addr(a) __addr = (a)
|
||||
|
||||
#undef __perf_count
|
||||
#define __perf_count(c) __count = (c)
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
static void ftrace_profile_##call(proto) \
|
||||
{ \
|
||||
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
||||
struct ftrace_event_call *event_call = &event_##call; \
|
||||
extern void perf_tpcounter_event(int, u64, u64, void *, int); \
|
||||
struct ftrace_raw_##call *entry; \
|
||||
u64 __addr = 0, __count = 1; \
|
||||
unsigned long irq_flags; \
|
||||
int __entry_size; \
|
||||
int __data_size; \
|
||||
int pc; \
|
||||
\
|
||||
local_save_flags(irq_flags); \
|
||||
pc = preempt_count(); \
|
||||
\
|
||||
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
||||
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
|
||||
sizeof(u64)); \
|
||||
__entry_size -= sizeof(u32); \
|
||||
\
|
||||
do { \
|
||||
char raw_data[__entry_size]; \
|
||||
struct trace_entry *ent; \
|
||||
\
|
||||
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
|
||||
entry = (struct ftrace_raw_##call *)raw_data; \
|
||||
ent = &entry->ent; \
|
||||
tracing_generic_entry_update(ent, irq_flags, pc); \
|
||||
ent->type = event_call->id; \
|
||||
\
|
||||
tstruct \
|
||||
\
|
||||
{ assign; } \
|
||||
\
|
||||
perf_tpcounter_event(event_call->id, __addr, __count, entry,\
|
||||
__entry_size); \
|
||||
} while (0); \
|
||||
\
|
||||
}
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
|
||||
#undef _TRACE_PROFILE_INIT
|
||||
|
||||
|
||||
Reference in New Issue
Block a user