summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/drm/drm_pciids.h10
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/clocksource.h122
-rw-r--r--include/linux/decompress/generic.h32
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/input/matrix_keypad.h13
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/nodemask.h28
-rw-r--r--include/linux/perf_counter.h73
-rw-r--r--include/linux/time.h8
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/net/bluetooth/rfcomm.h12
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/trace/ftrace.h183
22 files changed, 345 insertions, 186 deletions
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 3e798593b17b..ab0b85cf21f3 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -242,6 +242,10 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
acpi_status acpi_os_validate_interface(char *interface);
acpi_status acpi_osi_invalidate(char* interface);
+acpi_status
+acpi_os_validate_address(u8 space_id, acpi_physical_address address,
+ acpi_size length, char *name);
+
u64 acpi_os_get_timer(void);
acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 7174818c2c13..853508499d20 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -257,9 +257,12 @@
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
@@ -288,6 +291,7 @@
{0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
@@ -325,6 +329,7 @@
{0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -365,6 +370,11 @@
{0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e7cb5dbf6c26..69103e053c92 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -913,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short)
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 1219be4fb42e..83d2fbd81b93 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/cache.h>
#include <linux/timer.h>
+#include <linux/init.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -148,14 +149,11 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @disable: optional function to disable the clocksource
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters
- * @mult: cycle to nanosecond multiplier (adjusted by NTP)
- * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
+ * @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties
* @vread: vsyscall based read
* @resume: resume function for the clocksource, if necessary
- * @cycle_interval: Used internally by timekeeping core, please ignore.
- * @xtime_interval: Used internally by timekeeping core, please ignore.
*/
struct clocksource {
/*
@@ -169,7 +167,6 @@ struct clocksource {
void (*disable)(struct clocksource *cs);
cycle_t mask;
u32 mult;
- u32 mult_orig;
u32 shift;
unsigned long flags;
cycle_t (*vread)(void);
@@ -181,19 +178,12 @@ struct clocksource {
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
#endif
- /* timekeeping specific data, ignore */
- cycle_t cycle_interval;
- u64 xtime_interval;
- u32 raw_interval;
/*
* Second part is written at each timer interrupt
* Keep it in a different cache line to dirty no
* more than one cache line.
*/
cycle_t cycle_last ____cacheline_aligned_in_smp;
- u64 xtime_nsec;
- s64 error;
- struct timespec raw_time;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
@@ -202,8 +192,6 @@ struct clocksource {
#endif
};
-extern struct clocksource *clock; /* current clocksource */
-
/*
* Clock source flags bits::
*/
@@ -212,6 +200,7 @@ extern struct clocksource *clock; /* current clocksource */
#define CLOCK_SOURCE_WATCHDOG 0x10
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
+#define CLOCK_SOURCE_UNSTABLE 0x40
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
@@ -268,108 +257,15 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
}
/**
- * clocksource_read: - Access the clocksource's current cycle value
- * @cs: pointer to clocksource being read
- *
- * Uses the clocksource to return the current cycle_t value
- */
-static inline cycle_t clocksource_read(struct clocksource *cs)
-{
- return cs->read(cs);
-}
-
-/**
- * clocksource_enable: - enable clocksource
- * @cs: pointer to clocksource
- *
- * Enables the specified clocksource. The clocksource callback
- * function should start up the hardware and setup mult and field
- * members of struct clocksource to reflect hardware capabilities.
- */
-static inline int clocksource_enable(struct clocksource *cs)
-{
- int ret = 0;
-
- if (cs->enable)
- ret = cs->enable(cs);
-
- /*
- * The frequency may have changed while the clocksource
- * was disabled. If so the code in ->enable() must update
- * the mult value to reflect the new frequency. Make sure
- * mult_orig follows this change.
- */
- cs->mult_orig = cs->mult;
-
- return ret;
-}
-
-/**
- * clocksource_disable: - disable clocksource
- * @cs: pointer to clocksource
- *
- * Disables the specified clocksource. The clocksource callback
- * function should power down the now unused hardware block to
- * save power.
- */
-static inline void clocksource_disable(struct clocksource *cs)
-{
- /*
- * Save mult_orig in mult so clocksource_enable() can
- * restore the value regardless if ->enable() updates
- * the value of mult or not.
- */
- cs->mult = cs->mult_orig;
-
- if (cs->disable)
- cs->disable(cs);
-}
-
-/**
- * cyc2ns - converts clocksource cycles to nanoseconds
- * @cs: Pointer to clocksource
- * @cycles: Cycles
+ * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
*
- * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds.
+ * Converts cycles to nanoseconds, using the given mult and shift.
*
* XXX - This could use some mult_lxl_ll() asm optimization
*/
-static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
-{
- u64 ret = (u64)cycles;
- ret = (ret * cs->mult) >> cs->shift;
- return ret;
-}
-
-/**
- * clocksource_calculate_interval - Calculates a clocksource interval struct
- *
- * @c: Pointer to clocksource.
- * @length_nsec: Desired interval length in nanoseconds.
- *
- * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
- * pair and interval request.
- *
- * Unless you're the timekeeping code, you should not be using this!
- */
-static inline void clocksource_calculate_interval(struct clocksource *c,
- unsigned long length_nsec)
+static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
{
- u64 tmp;
-
- /* Do the ns -> cycle conversion first, using original mult */
- tmp = length_nsec;
- tmp <<= c->shift;
- tmp += c->mult_orig/2;
- do_div(tmp, c->mult_orig);
-
- c->cycle_interval = (cycle_t)tmp;
- if (c->cycle_interval == 0)
- c->cycle_interval = 1;
-
- /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
- c->xtime_interval = (u64)c->cycle_interval * c->mult;
- c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
+ return ((u64) cycles * mult) >> shift;
}
@@ -380,6 +276,8 @@ extern void clocksource_touch_watchdog(void);
extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_resume(void);
+extern struct clocksource * __init __weak clocksource_default_clock(void);
+extern void clocksource_mark_unstable(struct clocksource *cs);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
@@ -394,4 +292,6 @@ static inline void update_vsyscall_tz(void)
}
#endif
+extern void timekeeping_notify(struct clocksource *clock);
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
index 6dfb856327bb..0c7111a55a1a 100644
--- a/include/linux/decompress/generic.h
+++ b/include/linux/decompress/generic.h
@@ -1,31 +1,37 @@
#ifndef DECOMPRESS_GENERIC_H
#define DECOMPRESS_GENERIC_H
-/* Minimal chunksize to be read.
- *Bzip2 prefers at least 4096
- *Lzma prefers 0x10000 */
-#define COMPR_IOBUF_SIZE 4096
-
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
int(*fill)(void*, unsigned int),
- int(*writebb)(void*, unsigned int),
- unsigned char *output,
+ int(*flush)(void*, unsigned int),
+ unsigned char *outbuf,
int *posp,
void(*error)(char *x));
/* inbuf - input buffer
*len - len of pre-read data in inbuf
- *fill - function to fill inbuf if empty
- *writebb - function to write out outbug
+ *fill - function to fill inbuf when empty
+ *flush - function to write out outbuf
+ *outbuf - output buffer
*posp - if non-null, input position (number of bytes read) will be
* returned here
*
- *If len != 0, the inbuf is initialized (with as much data), and fill
- *should not be called
- *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
- *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
+ *If len != 0, inbuf should contain all the necessary input data, and fill
+ *should be NULL
+ *If len = 0, inbuf can be NULL, in which case the decompressor will allocate
+ *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
+ *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
+ *bytes should be read per call. Replace XXX with the appropriate decompressor
+ *name, i.e. LZMA_IOBUF_SIZE.
+ *
+ *If flush = NULL, outbuf must be large enough to buffer all the expected
+ *output. If flush != NULL, the output buffer will be allocated by the
+ *decompressor (outbuf = NULL), and the flush function will be called to
+ *flush the output buffer at the appropriate time (decompressor and stream
+ *dependent).
*/
+
/* Utility routine to detect the decompression method */
decompress_fn decompress_method(const unsigned char *inbuf, int len,
const char **name);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a36ffa5a77a4..67888a9e0655 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2137,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
-extern struct inode * inode_init_always(struct super_block *, struct inode *);
+extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void inode_add_to_lists(struct super_block *, struct inode *);
extern void iput(struct inode *);
@@ -2164,6 +2164,7 @@ extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void destroy_inode(struct inode *);
+extern void __destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int should_remove_suid(struct dentry *);
extern int file_remove_suid(struct file *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5c093ffc655b..a81170de7f6b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -89,7 +89,9 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
-
+void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned long flags,
+ int pc);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(int type, unsigned long len,
unsigned long flags, int pc);
@@ -119,11 +121,9 @@ struct ftrace_event_call {
void *filter;
void *mod;
-#ifdef CONFIG_EVENT_PROFILE
- atomic_t profile_count;
- int (*profile_enable)(struct ftrace_event_call *);
- void (*profile_disable)(struct ftrace_event_call *);
-#endif
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
};
#define MAX_FILTER_PRED 32
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 4759917adc71..ff037f0b1b4e 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,7 +91,6 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @cb_entry: list head to enqueue an expired timer into the callback list
* @start_site: timer statistics field to store the site where the timer
* was started
* @start_comm: timer statistics field to store the name of the process which
@@ -108,7 +107,6 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
- struct list_head cb_entry;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index acef2a770b6b..ad27c7da8798 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
-#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER)
+#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 7964516c6954..15d5903af2dd 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -15,12 +15,13 @@
#define KEY_COL(k) (((k) >> 16) & 0xff)
#define KEY_VAL(k) ((k) & 0xffff)
+#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
+
/**
* struct matrix_keymap_data - keymap for matrix keyboards
* @keymap: pointer to array of uint32 values encoded with KEY() macro
* representing keymap
* @keymap_size: number of entries (initialized) in this keymap
- * @max_keymap_size: maximum size of keymap supported by the device
*
* This structure is supposed to be used by platform code to supply
* keymaps to drivers that implement matrix-like keypads/keyboards.
@@ -28,14 +29,13 @@
struct matrix_keymap_data {
const uint32_t *keymap;
unsigned int keymap_size;
- unsigned int max_keymap_size;
};
/**
* struct matrix_keypad_platform_data - platform-dependent keypad data
* @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: array of gpio numbers reporesenting rows
- * @col_gpios: array of gpio numbers reporesenting colums
+ * @row_gpios: pointer to array of gpio numbers representing rows
+ * @col_gpios: pointer to array of gpio numbers reporesenting colums
* @num_row_gpios: actual number of row gpios used by device
* @num_col_gpios: actual number of col gpios used by device
* @col_scan_delay_us: delay, measured in microseconds, that is
@@ -48,8 +48,9 @@ struct matrix_keymap_data {
struct matrix_keypad_platform_data {
const struct matrix_keymap_data *keymap_data;
- unsigned int row_gpios[MATRIX_MAX_ROWS];
- unsigned int col_gpios[MATRIX_MAX_COLS];
+ const unsigned int *row_gpios;
+ const unsigned int *col_gpios;
+
unsigned int num_row_gpios;
unsigned int num_col_gpios;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 16713dc672e4..3060bdc35ffe 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -110,6 +110,7 @@ struct kvm_memory_slot {
struct kvm_kernel_irq_routing_entry {
u32 gsi;
+ u32 type;
int (*set)(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level);
union {
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 5675b63a0631..0f32a9b6ff55 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -251,7 +251,7 @@ struct mtd_info {
static inline struct mtd_info *dev_to_mtd(struct device *dev)
{
- return dev ? container_of(dev, struct mtd_info, dev) : NULL;
+ return dev ? dev_get_drvdata(dev) : NULL;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index af6dcb992bc3..b70313d33ff8 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -47,6 +47,8 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
+struct mtd_info;
+
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index fdffb413b192..f6b90240dd41 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
-extern void nfs_writedata_release(void *);
/*
* Try to write back everything synchronously (but check the
@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_write_data *wdata);
-extern void nfs_commitdata_release(void *wdata);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
* Allocate nfs_write_data structures
*/
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
+extern void nfs_writedata_free(struct nfs_write_data *);
/*
* linux/fs/nfs/read.c
@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
-extern void nfs_readdata_release(void *data);
extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
* Allocate nfs_read_data structures
*/
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
+extern void nfs_readdata_free(struct nfs_read_data *);
/*
* linux/fs/nfs3proc.c
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 829b94b156f2..b359c4a9ec9e 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -82,6 +82,12 @@
* to generate slightly worse code. So use a simple one-line #define
* for node_isset(), instead of wrapping an inline inside a macro, the
* way we do the other calls.
+ *
+ * NODEMASK_SCRATCH
+ * When doing above logical AND, OR, XOR, Remap operations the callers tend to
+ * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
+ * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
+ * for such situations. See below and CPUMASK_ALLOC also.
*/
#include <linux/kernel.h>
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+/*
+ * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ */
+
+#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
+#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
+#define NODEMASK_FREE(m) kfree(m)
+#else
+#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
+#define NODEMASK_FREE(m)
+#endif
+
+/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+struct nodemask_scratch {
+ nodemask_t mask1;
+ nodemask_t mask2;
+};
+
+#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+
+
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index bd15d7a5f5ce..b53f7006cc4e 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -115,27 +115,44 @@ enum perf_counter_sample_format {
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_GROUP = 1U << 4,
+ PERF_SAMPLE_READ = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
+ PERF_SAMPLE_RAW = 1U << 10,
- PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
};
/*
- * Bits that can be set in attr.read_format to request that
- * reads on the counter should return the indicated quantities,
- * in increasing order of bit value, after the counter value.
+ * The format of the data returned by read() on a perf counter fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
*/
enum perf_counter_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_GROUP = 1U << 3,
- PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -181,8 +198,9 @@ struct perf_counter_attr {
freq : 1, /* use freq, not period */
inherit_stat : 1, /* per task counts */
enable_on_exec : 1, /* next exec enables */
+ task : 1, /* trace fork/exit */
- __reserved_1 : 51;
+ __reserved_1 : 50;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@@ -311,6 +329,15 @@ enum perf_event_type {
/*
* struct {
* struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * };
+ */
+ PERF_EVENT_EXIT = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
* u64 time;
* u64 id;
* u64 stream_id;
@@ -323,6 +350,7 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
+ * u32 tid, ptid;
* };
*/
PERF_EVENT_FORK = 7,
@@ -331,10 +359,8 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u32 pid, tid;
- * u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 parent_id; } && PERF_FORMAT_ID
+ *
+ * struct read_format values;
* };
*/
PERF_EVENT_READ = 8,
@@ -352,11 +378,24 @@ enum perf_event_type {
* { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 period; } && PERF_SAMPLE_PERIOD
*
- * { u64 nr;
- * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
+ * { struct read_format values; } && PERF_SAMPLE_READ
*
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ *
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
+ *
+ * { u32 size;
+ * char data[size];}&& PERF_SAMPLE_RAW
* };
*/
PERF_EVENT_SAMPLE = 9,
@@ -402,6 +441,11 @@ struct perf_callchain_entry {
__u64 ip[PERF_MAX_STACK_DEPTH];
};
+struct perf_raw_record {
+ u32 size;
+ void *data;
+};
+
struct task_struct;
/**
@@ -670,10 +714,13 @@ struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
u64 period;
+ struct perf_raw_record *raw;
};
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
struct perf_sample_data *data);
+extern void perf_counter_output(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data);
/*
* Return 1 for a software counter, 0 for a hardware counter
diff --git a/include/linux/time.h b/include/linux/time.h
index ea16c1a01d51..256232f7e5e6 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -101,7 +101,8 @@ extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
-extern unsigned long read_persistent_clock(void);
+extern void read_persistent_clock(struct timespec *ts);
+extern void read_boot_clock(struct timespec *ts);
extern int update_persistent_clock(struct timespec now);
extern int no_sync_cmos_clock __read_mostly;
void timekeeping_init(void);
@@ -109,6 +110,8 @@ extern int timekeeping_suspended;
unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
+struct timespec __current_kernel_time(void); /* does not hold xtime_lock */
+struct timespec get_monotonic_coarse(void);
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
@@ -147,6 +150,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
extern void update_wall_time(void);
extern void update_xtime_cache(u64 nsec);
+extern void timekeeping_leap_insert(int leapsecond);
struct tms;
extern void do_sys_times(struct tms *);
@@ -241,6 +245,8 @@ struct itimerval {
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
+#define CLOCK_REALTIME_COARSE 5
+#define CLOCK_MONOTONIC_COARSE 6
/*
* The IDs of various hardware clocks:
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 40f38d896777..0c4ee9b88f85 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -144,7 +144,7 @@ struct tty_ldisc_ops {
struct tty_ldisc {
struct tty_ldisc_ops *ops;
- int refcount;
+ atomic_t users;
};
#define TTY_LDISC_MAGIC 0x5403
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6788e1a4d4ca..cf3c2f5dba51 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,7 +77,14 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
-extern void init_waitqueue_head(wait_queue_head_t *q);
+extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
+
+#define init_waitqueue_head(q) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_waitqueue_head((q), &__key); \
+ } while (0)
#ifdef CONFIG_LOCKDEP
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 80072611d26a..c274993234e3 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -355,7 +355,17 @@ struct rfcomm_dev_list_req {
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+
+#ifdef CONFIG_BT_RFCOMM_TTY
int rfcomm_init_ttys(void);
void rfcomm_cleanup_ttys(void);
-
+#else
+static inline int rfcomm_init_ttys(void)
+{
+ return 0;
+}
+static inline void rfcomm_cleanup_ttys(void)
+{
+}
+#endif
#endif /* __RFCOMM_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1a21895b732b..d1892d66701a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -979,6 +979,10 @@ struct cfg80211_ops {
* channels at a later time. This can be used for devices which do not
* have calibration information gauranteed for frequencies or settings
* outside of its regulatory domain.
+ * @disable_beacon_hints: enable this if your driver needs to ensure that
+ * passive scan flags and beaconing flags may not be lifted by cfg80211
+ * due to regulatory beacon hints. For more information on beacon
+ * hints read the documenation for regulatory_hint_found_beacon()
* @reg_notifier: the driver's regulatory notification callback
* @regd: the driver's regulatory domain, if one was requested via
* the regulatory_hint() API. This can be used by the driver
@@ -1004,6 +1008,7 @@ struct wiphy {
bool custom_regulatory;
bool strict_regulatory;
+ bool disable_beacon_hints;
enum cfg80211_signal_type signal_type;
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1867553c61e5..f64fbaae781a 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -144,6 +144,9 @@
#undef TP_fast_assign
#define TP_fast_assign(args...) args
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#ifdef CONFIG_EVENT_PROFILE
+
+/*
+ * Generate the functions needed for tracepoint perf_counter support.
+ *
+ * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
+ *
+ * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
+ * {
+ * int ret = 0;
+ *
+ * if (!atomic_inc_return(&event_call->profile_count))
+ * ret = register_trace_<call>(ftrace_profile_<call>);
+ *
+ * return ret;
+ * }
+ *
+ * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
+ * {
+ * if (atomic_add_negative(-1, &event->call->profile_count))
+ * unregister_trace_<call>(ftrace_profile_<call>);
+ * }
+ *
+ */
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+ \
+static void ftrace_profile_##call(proto); \
+ \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ \
+ if (!atomic_inc_return(&event_call->profile_count)) \
+ ret = register_trace_##call(ftrace_profile_##call); \
+ \
+ return ret; \
+} \
+ \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
+{ \
+ if (atomic_add_negative(-1, &event_call->profile_count)) \
+ unregister_trace_##call(ftrace_profile_##call); \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#endif
+
/*
* Stage 4 of the trace events.
*
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
#define TP_FMT(fmt, args...) fmt "\n", ##args
#ifdef CONFIG_EVENT_PROFILE
-#define _TRACE_PROFILE(call, proto, args) \
-static void ftrace_profile_##call(proto) \
-{ \
- extern void perf_tpcounter_event(int); \
- perf_tpcounter_event(event_##call.id); \
-} \
- \
-static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
-{ \
- int ret = 0; \
- \
- if (!atomic_inc_return(&event_call->profile_count)) \
- ret = register_trace_##call(ftrace_profile_##call); \
- \
- return ret; \
-} \
- \
-static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
-{ \
- if (atomic_add_negative(-1, &event_call->profile_count)) \
- unregister_trace_##call(ftrace_profile_##call); \
-}
#define _TRACE_PROFILE_INIT(call) \
.profile_count = ATOMIC_INIT(-1), \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
.profile_disable = ftrace_profile_disable_##call,
#else
-#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call)
#endif
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
-_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
\
static struct ftrace_event_call event_##call; \
\
@@ -586,6 +615,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef _TRACE_PROFILE
+/*
+ * Define the insertion callback to profile events
+ *
+ * The job is very similar to ftrace_raw_event_<call> except that we don't
+ * insert in the ring buffer but in a perf counter.
+ *
+ * static void ftrace_profile_<call>(proto)
+ * {
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
+ * struct ftrace_event_call *event_call = &event_<call>;
+ * extern void perf_tpcounter_event(int, u64, u64, void *, int);
+ * struct ftrace_raw_##call *entry;
+ * u64 __addr = 0, __count = 1;
+ * unsigned long irq_flags;
+ * int __entry_size;
+ * int __data_size;
+ * int pc;
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
+ * // Below we want to get the aligned size by taking into account
+ * // the u32 field that will later store the buffer size
+ * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
+ * sizeof(u64));
+ * __entry_size -= sizeof(u32);
+ *
+ * do {
+ * char raw_data[__entry_size]; <- allocate our sample in the stack
+ * struct trace_entry *ent;
+ *
+ * zero dead bytes from alignment to avoid stack leak to userspace:
+ *
+ * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
+ * entry = (struct ftrace_raw_<call> *)raw_data;
+ * ent = &entry->ent;
+ * tracing_generic_entry_update(ent, irq_flags, pc);
+ * ent->type = event_call->id;
+ *
+ * <tstruct> <- do some jobs with dynamic arrays
+ *
+ * <assign> <- affect our values
+ *
+ * perf_tpcounter_event(event_call->id, __addr, __count, entry,
+ * __entry_size); <- submit them to perf counter
+ * } while (0);
+ *
+ * }
+ */
+
+#ifdef CONFIG_EVENT_PROFILE
+
+#undef __perf_addr
+#define __perf_addr(a) __addr = (a)
+
+#undef __perf_count
+#define __perf_count(c) __count = (c)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+static void ftrace_profile_##call(proto) \
+{ \
+ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ struct ftrace_event_call *event_call = &event_##call; \
+ extern void perf_tpcounter_event(int, u64, u64, void *, int); \
+ struct ftrace_raw_##call *entry; \
+ u64 __addr = 0, __count = 1; \
+ unsigned long irq_flags; \
+ int __entry_size; \
+ int __data_size; \
+ int pc; \
+ \
+ local_save_flags(irq_flags); \
+ pc = preempt_count(); \
+ \
+ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
+ sizeof(u64)); \
+ __entry_size -= sizeof(u32); \
+ \
+ do { \
+ char raw_data[__entry_size]; \
+ struct trace_entry *ent; \
+ \
+ *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
+ entry = (struct ftrace_raw_##call *)raw_data; \
+ ent = &entry->ent; \
+ tracing_generic_entry_update(ent, irq_flags, pc); \
+ ent->type = event_call->id; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ perf_tpcounter_event(event_call->id, __addr, __count, entry,\
+ __entry_size); \
+ } while (0); \
+ \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_EVENT_PROFILE */
+
#undef _TRACE_PROFILE_INIT