From 1b08e907211cdc744f54871736005d9f3e7f182c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 14 Sep 2012 18:52:10 +0200 Subject: uprobes: Kill UTASK_BP_HIT state Kill UTASK_BP_HIT state, it buys nothing but complicates the code. It is only used in uprobe_notify_resume() to decide who should be called, we can check utask->active_uprobe != NULL instead. And this allows us to simplify handle_swbp(), no need to clear utask->state. Likewise we could kill UTASK_SSTEP, but UTASK_BP_HIT is worse and imho should die. The problem is, it creates the special case when task->utask is NULL, we can't distinguish RUNNING and BP_HIT. With this patch utask == NULL always means RUNNING. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- include/linux/uprobes.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index e6f0331e3d45..18d839da6517 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -59,7 +59,6 @@ struct uprobe_consumer { #ifdef CONFIG_UPROBES enum uprobe_task_state { UTASK_RUNNING, - UTASK_BP_HIT, UTASK_SSTEP, UTASK_SSTEP_ACK, UTASK_SSTEP_TRAPPED, -- cgit v1.2.3 From cb9a19fe4aa51afa34786bd383e6614fa0083d58 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 30 Sep 2012 20:11:45 +0200 Subject: uprobes: Introduce prepare_uprobe() Preparation. Extract the copy_insn/arch_uprobe_analyze_insn code from install_breakpoint() into the new helper, prepare_uprobe(). And move uprobe->flags defines from uprobes.h to uprobes.c, nobody else can use them anyway. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- include/linux/uprobes.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 18d839da6517..24594571c5a3 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -35,16 +35,6 @@ struct inode; # include #endif -/* flags that denote/change uprobes behaviour */ - -/* Have a copy of original instruction */ -#define UPROBE_COPY_INSN 0x1 - -/* Dont run handlers when first register/ last unregister in progress*/ -#define UPROBE_RUN_HANDLER 0x2 -/* Can skip singlestep */ -#define UPROBE_SKIP_SSTEP 0x4 - struct uprobe_consumer { int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); /* -- cgit v1.2.3 From fc5a40a2301aa241eedb16caf9169ca5763707c1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 9 Oct 2012 09:49:02 +0100 Subject: UAPI: (Scripted) Disintegrate include/linux/raid Signed-off-by: David Howells Acked-by: Arnd Bergmann Acked-by: Thomas Gleixner Acked-by: Michael Kerrisk Acked-by: Paul E. McKenney Acked-by: Dave Jones --- include/linux/raid/Kbuild | 2 - include/linux/raid/md_p.h | 301 ---------------------------------------------- include/linux/raid/md_u.h | 141 +--------------------- 3 files changed, 1 insertion(+), 443 deletions(-) delete mode 100644 include/linux/raid/md_p.h (limited to 'include/linux') diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild index 2415a64c5e51..e69de29bb2d1 100644 --- a/include/linux/raid/Kbuild +++ b/include/linux/raid/Kbuild @@ -1,2 +0,0 @@ -header-y += md_p.h -header-y += md_u.h diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h deleted file mode 100644 index ee753536ab70..000000000000 --- a/include/linux/raid/md_p.h +++ /dev/null @@ -1,301 +0,0 @@ -/* - md_p.h : physical layout of Linux RAID devices - Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ - -#ifndef _MD_P_H -#define _MD_P_H - -#include - -/* - * RAID superblock. - * - * The RAID superblock maintains some statistics on each RAID configuration. - * Each real device in the RAID set contains it near the end of the device. - * Some of the ideas are copied from the ext2fs implementation. - * - * We currently use 4096 bytes as follows: - * - * word offset function - * - * 0 - 31 Constant generic RAID device information. - * 32 - 63 Generic state information. - * 64 - 127 Personality specific information. - * 128 - 511 12 32-words descriptors of the disks in the raid set. - * 512 - 911 Reserved. - * 912 - 1023 Disk specific descriptor. - */ - -/* - * If x is the real device size in bytes, we return an apparent size of: - * - * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES - * - * and place the 4kB superblock at offset y. - */ -#define MD_RESERVED_BYTES (64 * 1024) -#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512) - -#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS) - -#define MD_SB_BYTES 4096 -#define MD_SB_WORDS (MD_SB_BYTES / 4) -#define MD_SB_SECTORS (MD_SB_BYTES / 512) - -/* - * The following are counted in 32-bit words - */ -#define MD_SB_GENERIC_OFFSET 0 -#define MD_SB_PERSONALITY_OFFSET 64 -#define MD_SB_DISKS_OFFSET 128 -#define MD_SB_DESCRIPTOR_OFFSET 992 - -#define MD_SB_GENERIC_CONSTANT_WORDS 32 -#define MD_SB_GENERIC_STATE_WORDS 32 -#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS) -#define MD_SB_PERSONALITY_WORDS 64 -#define MD_SB_DESCRIPTOR_WORDS 32 -#define MD_SB_DISKS 27 -#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS) -#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS) -#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS) - -/* - * Device "operational" state bits - */ -#define MD_DISK_FAULTY 0 /* disk is faulty / operational */ -#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */ -#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */ -#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */ - -#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config. - * read requests will only be sent here in - * dire need - */ - -typedef struct mdp_device_descriptor_s { - __u32 number; /* 0 Device number in the entire set */ - __u32 major; /* 1 Device major number */ - __u32 minor; /* 2 Device minor number */ - __u32 raid_disk; /* 3 The role of the device in the raid set */ - __u32 state; /* 4 Operational state */ - __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5]; -} mdp_disk_t; - -#define MD_SB_MAGIC 0xa92b4efc - -/* - * Superblock state bits - */ -#define MD_SB_CLEAN 0 -#define MD_SB_ERRORS 1 - -#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ - -/* - * Notes: - * - if an array is being reshaped (restriped) in order to change the - * the number of active devices in the array, 'raid_disks' will be - * the larger of the old and new numbers. 'delta_disks' will - * be the "new - old". So if +ve, raid_disks is the new value, and - * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the - * old value and "raid_disks+delta_disks" is the new (smaller) value. - */ - - -typedef struct mdp_superblock_s { - /* - * Constant generic information - */ - __u32 md_magic; /* 0 MD identifier */ - __u32 major_version; /* 1 major version to which the set conforms */ - __u32 minor_version; /* 2 minor version ... */ - __u32 patch_version; /* 3 patchlevel version ... */ - __u32 gvalid_words; /* 4 Number of used words in this section */ - __u32 set_uuid0; /* 5 Raid set identifier */ - __u32 ctime; /* 6 Creation time */ - __u32 level; /* 7 Raid personality */ - __u32 size; /* 8 Apparent size of each individual disk */ - __u32 nr_disks; /* 9 total disks in the raid set */ - __u32 raid_disks; /* 10 disks in a fully functional raid set */ - __u32 md_minor; /* 11 preferred MD minor device number */ - __u32 not_persistent; /* 12 does it have a persistent superblock */ - __u32 set_uuid1; /* 13 Raid set identifier #2 */ - __u32 set_uuid2; /* 14 Raid set identifier #3 */ - __u32 set_uuid3; /* 15 Raid set identifier #4 */ - __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16]; - - /* - * Generic state information - */ - __u32 utime; /* 0 Superblock update time */ - __u32 state; /* 1 State bits (clean, ...) */ - __u32 active_disks; /* 2 Number of currently active disks */ - __u32 working_disks; /* 3 Number of working disks */ - __u32 failed_disks; /* 4 Number of failed disks */ - __u32 spare_disks; /* 5 Number of spare disks */ - __u32 sb_csum; /* 6 checksum of the whole superblock */ -#ifdef __BIG_ENDIAN - __u32 events_hi; /* 7 high-order of superblock update count */ - __u32 events_lo; /* 8 low-order of superblock update count */ - __u32 cp_events_hi; /* 9 high-order of checkpoint update count */ - __u32 cp_events_lo; /* 10 low-order of checkpoint update count */ -#else - __u32 events_lo; /* 7 low-order of superblock update count */ - __u32 events_hi; /* 8 high-order of superblock update count */ - __u32 cp_events_lo; /* 9 low-order of checkpoint update count */ - __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ -#endif - __u32 recovery_cp; /* 11 recovery checkpoint sector count */ - /* There are only valid for minor_version > 90 */ - __u64 reshape_position; /* 12,13 next address in array-space for reshape */ - __u32 new_level; /* 14 new level we are reshaping to */ - __u32 delta_disks; /* 15 change in number of raid_disks */ - __u32 new_layout; /* 16 new layout */ - __u32 new_chunk; /* 17 new chunk size (bytes) */ - __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18]; - - /* - * Personality information - */ - __u32 layout; /* 0 the array's physical layout */ - __u32 chunk_size; /* 1 chunk size in bytes */ - __u32 root_pv; /* 2 LV root PV */ - __u32 root_block; /* 3 LV root block */ - __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4]; - - /* - * Disks information - */ - mdp_disk_t disks[MD_SB_DISKS]; - - /* - * Reserved - */ - __u32 reserved[MD_SB_RESERVED_WORDS]; - - /* - * Active descriptor - */ - mdp_disk_t this_disk; - -} mdp_super_t; - -static inline __u64 md_event(mdp_super_t *sb) { - __u64 ev = sb->events_hi; - return (ev<<32)| sb->events_lo; -} - -#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1) - -/* - * The version-1 superblock : - * All numeric fields are little-endian. - * - * total size: 256 bytes plus 2 per device. - * 1K allows 384 devices. - */ -struct mdp_superblock_1 { - /* constant array information - 128 bytes */ - __le32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */ - __le32 major_version; /* 1 */ - __le32 feature_map; /* bit 0 set if 'bitmap_offset' is meaningful */ - __le32 pad0; /* always set to 0 when writing */ - - __u8 set_uuid[16]; /* user-space generated. */ - char set_name[32]; /* set and interpreted by user-space */ - - __le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/ - __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */ - __le32 layout; /* only for raid5 and raid10 currently */ - __le64 size; /* used size of component devices, in 512byte sectors */ - - __le32 chunksize; /* in 512byte sectors */ - __le32 raid_disks; - __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts - * NOTE: signed, so bitmap can be before superblock - * only meaningful of feature_map[0] is set. - */ - - /* These are only valid with feature bit '4' */ - __le32 new_level; /* new level we are reshaping to */ - __le64 reshape_position; /* next address in array-space for reshape */ - __le32 delta_disks; /* change in number of raid_disks */ - __le32 new_layout; /* new layout */ - __le32 new_chunk; /* new chunk size (512byte sectors) */ - __le32 new_offset; /* signed number to add to data_offset in new - * layout. 0 == no-change. This can be - * different on each device in the array. - */ - - /* constant this-device information - 64 bytes */ - __le64 data_offset; /* sector start of data, often 0 */ - __le64 data_size; /* sectors in this device that can be used for data */ - __le64 super_offset; /* sector start of this superblock */ - __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ - __le32 dev_number; /* permanent identifier of this device - not role in raid */ - __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ - __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ - __u8 devflags; /* per-device flags. Only one defined...*/ -#define WriteMostly1 1 /* mask for writemostly flag in above */ - /* Bad block log. If there are any bad blocks the feature flag is set. - * If offset and size are non-zero, that space is reserved and available - */ - __u8 bblog_shift; /* shift from sectors to block size */ - __le16 bblog_size; /* number of sectors reserved for list */ - __le32 bblog_offset; /* sector offset from superblock to bblog, - * signed - not unsigned */ - - /* array state information - 64 bytes */ - __le64 utime; /* 40 bits second, 24 bits microseconds */ - __le64 events; /* incremented when superblock updated */ - __le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */ - __le32 sb_csum; /* checksum up to devs[max_dev] */ - __le32 max_dev; /* size of devs[] array to consider */ - __u8 pad3[64-32]; /* set to 0 when writing */ - - /* device state information. Indexed by dev_number. - * 2 bytes per device - * Note there are no per-device state flags. State information is rolled - * into the 'roles' value. If a device is spare or faulty, then it doesn't - * have a meaningful role. - */ - __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */ -}; - -/* feature_map bits */ -#define MD_FEATURE_BITMAP_OFFSET 1 -#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and - * must be honoured - */ -#define MD_FEATURE_RESHAPE_ACTIVE 4 -#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */ -#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an - * active device with same 'role'. - * 'recovery_offset' is also set. - */ -#define MD_FEATURE_RESHAPE_BACKWARDS 32 /* Reshape doesn't change number - * of devices, but is going - * backwards anyway. - */ -#define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */ -#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ - |MD_FEATURE_RECOVERY_OFFSET \ - |MD_FEATURE_RESHAPE_ACTIVE \ - |MD_FEATURE_BAD_BLOCKS \ - |MD_FEATURE_REPLACEMENT \ - |MD_FEATURE_RESHAPE_BACKWARDS \ - |MD_FEATURE_NEW_OFFSET \ - ) - -#endif diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h index fb1abb3367e9..358c04bfbe2a 100644 --- a/include/linux/raid/md_u.h +++ b/include/linux/raid/md_u.h @@ -11,149 +11,10 @@ (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ - #ifndef _MD_U_H #define _MD_U_H -/* - * Different major versions are not compatible. - * Different minor versions are only downward compatible. - * Different patchlevel versions are downward and upward compatible. - */ -#define MD_MAJOR_VERSION 0 -#define MD_MINOR_VERSION 90 -/* - * MD_PATCHLEVEL_VERSION indicates kernel functionality. - * >=1 means different superblock formats are selectable using SET_ARRAY_INFO - * and major_version/minor_version accordingly - * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT - * in the super status byte - * >=3 means that bitmap superblock version 4 is supported, which uses - * little-ending representation rather than host-endian - */ -#define MD_PATCHLEVEL_VERSION 3 - -/* ioctls */ - -/* status */ -#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t) -#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t) -#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t) -#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13) -#define RAID_AUTORUN _IO (MD_MAJOR, 0x14) -#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t) - -/* configuration */ -#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20) -#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t) -#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22) -#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t) -#define SET_DISK_INFO _IO (MD_MAJOR, 0x24) -#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25) -#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26) -#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27) -#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28) -#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29) -#define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a) -#define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int) +#include -/* usage */ -#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t) -/* 0x31 was START_ARRAY */ -#define STOP_ARRAY _IO (MD_MAJOR, 0x32) -#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) -#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) - -/* 63 partitions with the alternate major number (mdp) */ -#define MdpMinorShift 6 -#ifdef __KERNEL__ extern int mdp_major; -#endif - -typedef struct mdu_version_s { - int major; - int minor; - int patchlevel; -} mdu_version_t; - -typedef struct mdu_array_info_s { - /* - * Generic constant information - */ - int major_version; - int minor_version; - int patch_version; - int ctime; - int level; - int size; - int nr_disks; - int raid_disks; - int md_minor; - int not_persistent; - - /* - * Generic state information - */ - int utime; /* 0 Superblock update time */ - int state; /* 1 State bits (clean, ...) */ - int active_disks; /* 2 Number of currently active disks */ - int working_disks; /* 3 Number of working disks */ - int failed_disks; /* 4 Number of failed disks */ - int spare_disks; /* 5 Number of spare disks */ - - /* - * Personality information - */ - int layout; /* 0 the array's physical layout */ - int chunk_size; /* 1 chunk size in bytes */ - -} mdu_array_info_t; - -/* non-obvious values for 'level' */ -#define LEVEL_MULTIPATH (-4) -#define LEVEL_LINEAR (-1) -#define LEVEL_FAULTY (-5) - -/* we need a value for 'no level specified' and 0 - * means 'raid0', so we need something else. This is - * for internal use only - */ -#define LEVEL_NONE (-1000000) - -typedef struct mdu_disk_info_s { - /* - * configuration/status of one particular disk - */ - int number; - int major; - int minor; - int raid_disk; - int state; - -} mdu_disk_info_t; - -typedef struct mdu_start_info_s { - /* - * configuration/status of one particular disk - */ - int major; - int minor; - int raid_disk; - int state; - -} mdu_start_info_t; - -typedef struct mdu_bitmap_file_s -{ - char pathname[4096]; -} mdu_bitmap_file_t; - -typedef struct mdu_param_s -{ - int personality; /* 1,2,3,4 */ - int chunk_size; /* in bytes */ - int max_fault; /* unused for now */ -} mdu_param_t; - #endif - -- cgit v1.2.3 From 2f5f1ce90a5fa097372bb895452a718e4d33e4e3 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Fri, 17 Aug 2012 14:47:30 +0300 Subject: spi: tsc2005: delete soon-obsolete e-mail address Delete soon-obsolete e-mail address. Signed-off-by: Aaro Koskinen Signed-off-by: Mark Brown --- include/linux/spi/tsc2005.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h index d9b0c84220c7..8f721e465e05 100644 --- a/include/linux/spi/tsc2005.h +++ b/include/linux/spi/tsc2005.h @@ -3,8 +3,6 @@ * * Copyright (C) 2009-2010 Nokia Corporation * - * Contact: Aaro Koskinen - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or -- cgit v1.2.3 From 9dbf8ccde1b810a59b684e1d1aec7f9d2d007162 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 15 Oct 2012 10:35:00 +0100 Subject: iio: Add some helper macros for unit conversion Some datasheets use a different unit to specify the channel scale than what IIO expects it to be. This patch adds two helper macros which allow to convert units commonly used in datasheets to IIO units: * acceleration: g -> meter / second**2 * angular velocity: degree (/ second) -> rad (/ second) This makes it much more convenient to specify and also easier to verify a channel's scale attribute. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index c0ae76ac4e0b..7806c24e5bc8 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -618,4 +618,20 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) }; #endif +/** + * IIO_DEGREE_TO_RAD() - Convert degree to rad + * @deg: A value in degree + * + * Returns the given value converted from degree to rad + */ +#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) + +/** + * IIO_G_TO_M_S_2() - Convert g to meter / second**2 + * @g: A value in g + * + * Returns the given value converted from g to meter / second**2 + */ +#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) + #endif /* _INDUSTRIAL_IO_H_ */ -- cgit v1.2.3 From 0cf6ad8a18f7f7bdbb81975188d9e0656ef277dd Mon Sep 17 00:00:00 2001 From: anish kumar Date: Thu, 30 Aug 2012 00:35:09 +0530 Subject: extcon: standard cable names definition and declaration changed With this change now individual drivers can use standard cable names as below: static const char *arizona_cable[] = { extcon_cable_name[EXTCON_USB], extcon_cable_name[EXTCON_USB_HOST], "CUSTOM_CABLE" NULL, } Signed-off-by: anish kumar Signed-off-by: MyungJoo Ham --- include/linux/extcon.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 7443a560c9d0..2c26c14cd710 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -68,7 +68,7 @@ enum extcon_cable_name { EXTCON_VIDEO_OUT, EXTCON_MECHANICAL, }; -extern const char *extcon_cable_name[]; +extern const char extcon_cable_name[][CABLE_NAME_MAX + 1]; struct extcon_cable; -- cgit v1.2.3 From 6f73601efb35c7003f5c58c2bc6fd08f3652169c Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Fri, 19 Oct 2012 15:14:44 +0000 Subject: tcp: add SYN/data info to TCP_INFO Add a bit TCPI_OPT_SYN_DATA (32) to the socket option TCP_INFO:tcpi_options. It's set if the data in SYN (sent or received) is acked by SYN-ACK. Server or client application can use this information to check Fast Open success rate. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 8a7fc4be2d75..60b7aac15e0e 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -191,7 +191,8 @@ struct tcp_sock { u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ early_retrans_delayed:1, /* Delayed ER timer installed */ syn_data:1, /* SYN includes data */ - syn_fastopen:1; /* SYN includes Fast Open option */ + syn_fastopen:1, /* SYN includes Fast Open option */ + syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ /* RTT measurement */ u32 srtt; /* smoothed round trip time << 3 */ -- cgit v1.2.3 From f8457d574f680a98c77846a15df13086ab1ab426 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Mon, 8 Oct 2012 14:41:49 +0900 Subject: extcon: MAX77693: Add platform data for MUIC device to initialize registers This patch add platform data for MUIC device to initialize register on probe() call because it should unmask interrupt mask register and initialize some register related to MUIC device. Signed-off-by: Chanwoo Choi Signed-off-by: Myungjoo Ham Signed-off-by: Kyungmin Park --- include/linux/mfd/max77693.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h index 1d28ae90384e..fe03b2d35d4f 100644 --- a/include/linux/mfd/max77693.h +++ b/include/linux/mfd/max77693.h @@ -30,7 +30,20 @@ #ifndef __LINUX_MFD_MAX77693_H #define __LINUX_MFD_MAX77693_H +struct max77693_reg_data { + u8 addr; + u8 data; +}; + +struct max77693_muic_platform_data { + struct max77693_reg_data *init_data; + int num_init_data; +}; + struct max77693_platform_data { int wakeup; + + /* muic data */ + struct max77693_muic_platform_data *muic_data; }; #endif /* __LINUX_MFD_MAX77693_H */ -- cgit v1.2.3 From 6760bca9fd16256210f4922a3e9f067d2c7017d7 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Tue, 16 Oct 2012 13:28:10 +0530 Subject: perf, cpu hotplug: Run CPU_STARTING notifiers with irqs disabled The CPU_STARTING notifiers are supposed to be run with irqs disabled. But the perf_cpu_notifier() macro invokes them without doing that. Fix it. Signed-off-by: Srivatsa S. Bhat Reviewed-by: Paul E. McKenney Cc: peterz@infradead.org Cc: acme@ghostprotocols.net Link: http://lkml.kernel.org/r/20121016075809.3572.47848.stgit@srivatsabhat.in.ibm.com Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e902359aee5..06478056da03 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -803,10 +803,13 @@ static inline void perf_event_task_tick(void) { } do { \ static struct notifier_block fn##_nb __cpuinitdata = \ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ + unsigned long flags; \ fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ (void *)(unsigned long)smp_processor_id()); \ + local_irq_save(flags); \ fn(&fn##_nb, (unsigned long)CPU_STARTING, \ (void *)(unsigned long)smp_processor_id()); \ + local_irq_restore(flags); \ fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ (void *)(unsigned long)smp_processor_id()); \ register_cpu_notifier(&fn##_nb); \ -- cgit v1.2.3 From c13d38e4a1fd5dd07135403c613c8091af444169 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Tue, 16 Oct 2012 13:28:17 +0530 Subject: perf, cpu hotplug: Use cached value of smp_processor_id() The perf_cpu_notifier() macro invokes smp_processor_id() multiple times. Optimize it by using a local variable. Signed-off-by: Srivatsa S. Bhat Reviewed-by: Paul E. McKenney Cc: peterz@infradead.org Cc: acme@ghostprotocols.net Link: http://lkml.kernel.org/r/20121016075817.3572.76733.stgit@srivatsabhat.in.ibm.com Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 06478056da03..6bfb2faa0b19 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -803,15 +803,16 @@ static inline void perf_event_task_tick(void) { } do { \ static struct notifier_block fn##_nb __cpuinitdata = \ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ + unsigned long cpu = smp_processor_id(); \ unsigned long flags; \ fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ - (void *)(unsigned long)smp_processor_id()); \ + (void *)(unsigned long)cpu); \ local_irq_save(flags); \ fn(&fn##_nb, (unsigned long)CPU_STARTING, \ - (void *)(unsigned long)smp_processor_id()); \ + (void *)(unsigned long)cpu); \ local_irq_restore(flags); \ fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ - (void *)(unsigned long)smp_processor_id()); \ + (void *)(unsigned long)cpu); \ register_cpu_notifier(&fn##_nb); \ } while (0) -- cgit v1.2.3 From 6ede1fd3cb404c0016de6ac529df46d561bd558b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 22 Oct 2012 16:35:18 -0700 Subject: x86, mm: Trim memory in memblock to be page aligned We will not map partial pages, so need to make sure memblock allocation will not allocate those bytes out. Also we will use for_each_mem_pfn_range() to loop to map memory range to keep them consistent. Signed-off-by: Yinghai Lu Link: http://lkml.kernel.org/r/CAE9FiQVZirvaBMFYRfXMmWEcHbKSicQEHz4VAwUv0xFCk51ZNw@mail.gmail.com Acked-by: Jacob Shin Signed-off-by: H. Peter Anvin Cc: --- include/linux/memblock.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 569d67d4243e..d452ee191066 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); +void memblock_trim_memory(phys_addr_t align); #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, -- cgit v1.2.3 From c0d2af637863940b1a4fb208224ca7acb905c39f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 18 Oct 2012 12:07:03 -0700 Subject: dynamic_debug: Remove unnecessary __used The __used attribute prevents gcc from eliminating unnecessary, otherwise optimized away, metadata for debugging logging messages. Remove the __used attribute. Signed-off-by: Joe Perches Acked-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- include/linux/dynamic_debug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index c18257b0fa72..6dd4787a798a 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -61,7 +61,7 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor, const char *fmt, ...); #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ - static struct _ddebug __used __aligned(8) \ + static struct _ddebug __aligned(8) \ __attribute__((section("__verbose"))) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ -- cgit v1.2.3 From 29fc7c5a4f516d388fb6e1f6d24bfb04b8093e54 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 25 Oct 2012 13:37:53 -0700 Subject: rbtree: include linux/compiler.h for definition of __always_inline rb_erase_augmented() is a static function annotated with __always_inline. This causes a compile failure when attempting to use the rbtree implementation as a library (e.g. kvm tool): rbtree_augmented.h:125:24: error: expected `=', `,', `;', `asm' or `__attribute__' before `void' Include linux/compiler.h in rbtree_augmented.h so that the __always_inline macro is resolved correctly. Signed-off-by: Will Deacon Cc: Pekka Enberg Reviewed-by: Michel Lespinasse Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rbtree_augmented.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 214caa33433b..2ac60c9cf644 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -24,6 +24,7 @@ #ifndef _LINUX_RBTREE_AUGMENTED_H #define _LINUX_RBTREE_AUGMENTED_H +#include #include /* -- cgit v1.2.3 From 5c1eabe68501d1e1b1586c7f4c46cc531828c4ab Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 22 Oct 2012 19:37:47 -0400 Subject: percpu-rw-semaphores: use light/heavy barriers This patch introduces new barrier pair light_mb() and heavy_mb() for percpu rw semaphores. This patch fixes a bug in percpu-rw-semaphores where a barrier was missing in percpu_up_write. This patch improves performance on the read path of percpu-rw-semaphores: on non-x86 cpus, there was a smp_mb() in percpu_up_read. This patch changes it to a compiler barrier and removes the "#if defined(X86) ..." condition. From: Lai Jiangshan Signed-off-by: Mikulas Patocka Signed-off-by: Linus Torvalds --- include/linux/percpu-rwsem.h | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index cf80f7e5277f..18f35b54286c 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -12,6 +12,9 @@ struct percpu_rw_semaphore { struct mutex mtx; }; +#define light_mb() barrier() +#define heavy_mb() synchronize_sched() + static inline void percpu_down_read(struct percpu_rw_semaphore *p) { rcu_read_lock(); @@ -24,22 +27,12 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *p) } this_cpu_inc(*p->counters); rcu_read_unlock(); + light_mb(); /* A, between read of p->locked and read of data, paired with D */ } static inline void percpu_up_read(struct percpu_rw_semaphore *p) { - /* - * On X86, write operation in this_cpu_dec serves as a memory unlock - * barrier (i.e. memory accesses may be moved before the write, but - * no memory accesses are moved past the write). - * On other architectures this may not be the case, so we need smp_mb() - * there. - */ -#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE)) - barrier(); -#else - smp_mb(); -#endif + light_mb(); /* B, between read of the data and write to p->counter, paired with C */ this_cpu_dec(*p->counters); } @@ -61,11 +54,12 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p) synchronize_rcu(); while (__percpu_count(p->counters)) msleep(1); - smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */ + heavy_mb(); /* C, between read of p->counter and write to data, paired with B */ } static inline void percpu_up_write(struct percpu_rw_semaphore *p) { + heavy_mb(); /* D, between write to data and write to p->locked, paired with A */ p->locked = false; mutex_unlock(&p->mtx); } -- cgit v1.2.3 From 1bf11c53535ab87e3bf14ecdf6747bf46f601c5d Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 22 Oct 2012 19:39:16 -0400 Subject: percpu-rw-semaphores: use rcu_read_lock_sched Use rcu_read_lock_sched / rcu_read_unlock_sched / synchronize_sched instead of rcu_read_lock / rcu_read_unlock / synchronize_rcu. This is an optimization. The RCU-protected region is very small, so there will be no latency problems if we disable preempt in this region. So we use rcu_read_lock_sched / rcu_read_unlock_sched that translates to preempt_disable / preempt_disable. It is smaller (and supposedly faster) than preemptible rcu_read_lock / rcu_read_unlock. Signed-off-by: Mikulas Patocka Signed-off-by: Linus Torvalds --- include/linux/percpu-rwsem.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 18f35b54286c..250a4acddb2b 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -17,16 +17,16 @@ struct percpu_rw_semaphore { static inline void percpu_down_read(struct percpu_rw_semaphore *p) { - rcu_read_lock(); + rcu_read_lock_sched(); if (unlikely(p->locked)) { - rcu_read_unlock(); + rcu_read_unlock_sched(); mutex_lock(&p->mtx); this_cpu_inc(*p->counters); mutex_unlock(&p->mtx); return; } this_cpu_inc(*p->counters); - rcu_read_unlock(); + rcu_read_unlock_sched(); light_mb(); /* A, between read of p->locked and read of data, paired with D */ } @@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p) { mutex_lock(&p->mtx); p->locked = true; - synchronize_rcu(); + synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */ while (__percpu_count(p->counters)) msleep(1); heavy_mb(); /* C, between read of p->counter and write to data, paired with B */ -- cgit v1.2.3 From 87da7e66a40532b743cd50972fcf85a1f15b14ea Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Oct 2012 14:07:59 +0800 Subject: KVM: x86: fix vcpu->mmio_fragments overflow After commit b3356bf0dbb349 (KVM: emulator: optimize "rep ins" handling), the pieces of io data can be collected and write them to the guest memory or MMIO together Unfortunately, kvm splits the mmio access into 8 bytes and store them to vcpu->mmio_fragments. If the guest uses "rep ins" to move large data, it will cause vcpu->mmio_fragments overflow The bug can be exposed by isapc (-M isapc): [23154.818733] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC [ ......] [23154.858083] Call Trace: [23154.859874] [] kvm_get_cr8+0x1d/0x28 [kvm] [23154.861677] [] kvm_arch_vcpu_ioctl_run+0xcda/0xe45 [kvm] [23154.863604] [] ? kvm_arch_vcpu_load+0x17b/0x180 [kvm] Actually, we can use one mmio_fragment to store a large mmio access then split it when we pass the mmio-exit-info to userspace. After that, we only need two entries to store mmio info for the cross-mmio pages access Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- include/linux/kvm_host.h | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 93bfc9f9815c..ecc554374e44 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -42,19 +42,8 @@ */ #define KVM_MEMSLOT_INVALID (1UL << 16) -/* - * If we support unaligned MMIO, at most one fragment will be split into two: - */ -#ifdef KVM_UNALIGNED_MMIO -# define KVM_EXTRA_MMIO_FRAGMENTS 1 -#else -# define KVM_EXTRA_MMIO_FRAGMENTS 0 -#endif - -#define KVM_USER_MMIO_SIZE 8 - -#define KVM_MAX_MMIO_FRAGMENTS \ - (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) +/* Two fragments for cross MMIO pages. */ +#define KVM_MAX_MMIO_FRAGMENTS 2 /* * For the normal pfn, the highest 12 bits should be zero, -- cgit v1.2.3 From d9b482c8ba1973a189f2d4c8175d405b87fbf2d7 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Tue, 30 Oct 2012 14:45:57 -0400 Subject: hashtable: introduce a small and naive hashtable This hashtable implementation is using hlist buckets to provide a simple hashtable to prevent it from getting reimplemented all over the kernel. Signed-off-by: Sasha Levin [ Merging this now, so that subsystems can start applying Sasha's patches that use this - Linus ] Signed-off-by: Linus Torvalds --- include/linux/hashtable.h | 192 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 include/linux/hashtable.h (limited to 'include/linux') diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h new file mode 100644 index 000000000000..227c62424f3c --- /dev/null +++ b/include/linux/hashtable.h @@ -0,0 +1,192 @@ +/* + * Statically sized hash table implementation + * (C) 2012 Sasha Levin + */ + +#ifndef _LINUX_HASHTABLE_H +#define _LINUX_HASHTABLE_H + +#include +#include +#include +#include +#include + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +/** + * hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + * + * Calculates the size of the hashtable from the given parameter, otherwise + * same as hash_init_size. + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_add_rcu - add an object to a rcu enabled hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add_rcu(hashtable, node, key) \ + hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_hashed - check whether an object is in any hashtable + * @node: the &struct hlist_node of the object to be checked + */ +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +/** + * hash_empty - check whether a hashtable is empty + * @hashtable: hashtable to check + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_del - remove an object from a hashtable + * @node: &struct hlist_node of the object to remove + */ +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} + +/** + * hash_del_rcu - remove an object from a rcu enabled hashtable + * @node: &struct hlist_node of the object to remove + */ +static inline void hash_del_rcu(struct hlist_node *node) +{ + hlist_del_init_rcu(node); +} + +/** + * hash_for_each - iterate over a hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @node: the &struct list_head to use as a loop cursor for each entry + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each(name, bkt, node, obj, member) \ + for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ + hlist_for_each_entry(obj, node, &name[bkt], member) + +/** + * hash_for_each_rcu - iterate over a rcu enabled hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @node: the &struct list_head to use as a loop cursor for each entry + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_rcu(name, bkt, node, obj, member) \ + for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ + hlist_for_each_entry_rcu(obj, node, &name[bkt], member) + +/** + * hash_for_each_safe - iterate over a hashtable safe against removal of + * hash entry + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @node: the &struct list_head to use as a loop cursor for each entry + * @tmp: a &struct used for temporary storage + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \ + for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ + hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member) + +/** + * hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @node: the &struct list_head to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible(name, obj, node, member, key) \ + hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_rcu - iterate over all possible objects hashing to the + * same bucket in an rcu enabled hashtable + * in a rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @node: the &struct list_head to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_rcu(name, obj, node, member, key) \ + hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_safe - iterate over all possible objects hashing to the + * same bucket safe against removals + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @node: the &struct list_head to use as a loop cursor for each entry + * @tmp: a &struct used for temporary storage + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \ + hlist_for_each_entry_safe(obj, node, tmp, \ + &name[hash_min(key, HASH_BITS(name))], member) + + +#endif -- cgit v1.2.3