summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/ivt.S84
-rw-r--r--arch/ia64/kernel/minstate.h46
-rw-r--r--arch/ia64/kernel/patch.c23
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S7
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/appldata/appldata_base.c8
-rw-r--r--arch/s390/defconfig11
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/mm/init.c49
-rw-r--r--arch/s390/mm/vmem.c18
-rw-r--r--drivers/base/core.c15
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c38
-rw-r--r--drivers/input/misc/apanel.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.c33
-rw-r--r--drivers/input/tablet/gtco.c17
-rw-r--r--drivers/input/touchscreen/wm9713.c22
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c25
-rw-r--r--drivers/misc/thinkpad_acpi.c10
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c65
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c1
-rw-r--r--drivers/net/wireless/rtl8180_grf5101.c2
-rw-r--r--drivers/net/wireless/rtl8180_max2820.c5
-rw-r--r--drivers/net/wireless/rtl8180_sa2400.c2
-rw-r--r--drivers/s390/block/dasd.c28
-rw-r--r--drivers/s390/char/raw3270.c9
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c27
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_core.c16
-rw-r--r--drivers/usb/core/generic.c5
-rw-r--r--drivers/usb/core/hcd.h2
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/sysfs.c44
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c7
-rw-r--r--drivers/usb/host/ehci-hub.c16
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c4
-rw-r--r--drivers/usb/host/ehci-orion.c8
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-ppc-soc.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c1
-rw-r--r--drivers/usb/host/ehci-sched.c67
-rw-r--r--drivers/usb/host/ehci.h5
-rw-r--r--drivers/usb/misc/Kconfig11
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/isight_firmware.c131
-rw-r--r--drivers/usb/serial/ftdi_sio.c264
-rw-r--r--drivers/usb/serial/ftdi_sio.h267
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/watchdog/geodewdt.c3
-rw-r--r--include/asm-ia64/patch.h1
-rw-r--r--include/asm-ia64/ptrace.h2
-rw-r--r--include/asm-ia64/sections.h1
-rw-r--r--include/asm-s390/types.h6
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/input.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/topology.h4
-rw-r--r--include/linux/wm97xx.h1
-rw-r--r--kernel/sched.c447
-rw-r--r--kernel/sched_clock.c18
-rw-r--r--kernel/sched_debug.c5
-rw-r--r--kernel/sched_fair.c254
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/sched_stats.h1
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/dccp/ccids/ccid3.c13
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/llc/llc_sap.c10
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mlme.c29
-rw-r--r--net/mac80211/rx.c4
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/mac80211/wext.c11
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
94 files changed, 1502 insertions, 938 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 6678c49daba3..80b44ea052d7 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -1076,48 +1076,6 @@ END(ia64_syscall_setup)
DBG_FAULT(15)
FAULT(15)
- /*
- * Squatting in this space ...
- *
- * This special case dispatcher for illegal operation faults allows preserved
- * registers to be modified through a callback function (asm only) that is handed
- * back from the fault handler in r8. Up to three arguments can be passed to the
- * callback function by returning an aggregate with the callback as its first
- * element, followed by the arguments.
- */
-ENTRY(dispatch_illegal_op_fault)
- .prologue
- .body
- SAVE_MIN_WITH_COVER
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
- mov out0=ar.ec
- ;;
- SAVE_REST
- PT_REGS_UNWIND_INFO(0)
- ;;
- br.call.sptk.many rp=ia64_illegal_op_fault
-.ret0: ;;
- alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
- mov out0=r9
- mov out1=r10
- mov out2=r11
- movl r15=ia64_leave_kernel
- ;;
- mov rp=r15
- mov b6=r8
- ;;
- cmp.ne p6,p0=0,r8
-(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
- br.sptk.many ia64_leave_kernel
-END(dispatch_illegal_op_fault)
-
.org ia64_ivt+0x4000
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved
@@ -1715,6 +1673,48 @@ END(ia32_interrupt)
DBG_FAULT(67)
FAULT(67)
+ /*
+ * Squatting in this space ...
+ *
+ * This special case dispatcher for illegal operation faults allows preserved
+ * registers to be modified through a callback function (asm only) that is handed
+ * back from the fault handler in r8. Up to three arguments can be passed to the
+ * callback function by returning an aggregate with the callback as its first
+ * element, followed by the arguments.
+ */
+ENTRY(dispatch_illegal_op_fault)
+ .prologue
+ .body
+ SAVE_MIN_WITH_COVER
+ ssm psr.ic | PSR_DEFAULT_BITS
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+(p15) ssm psr.i // restore psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
+ mov out0=ar.ec
+ ;;
+ SAVE_REST
+ PT_REGS_UNWIND_INFO(0)
+ ;;
+ br.call.sptk.many rp=ia64_illegal_op_fault
+.ret0: ;;
+ alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
+ mov out0=r9
+ mov out1=r10
+ mov out2=r11
+ movl r15=ia64_leave_kernel
+ ;;
+ mov rp=r15
+ mov b6=r8
+ ;;
+ cmp.ne p6,p0=0,r8
+(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
+ br.sptk.many ia64_leave_kernel
+END(dispatch_illegal_op_fault)
+
#ifdef CONFIG_IA32_SUPPORT
/*
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 7c548ac52bbc..74b6d670aaef 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -15,6 +15,9 @@
#define ACCOUNT_SYS_ENTER
#endif
+.section ".data.patch.rse", "a"
+.previous
+
/*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
@@ -40,7 +43,7 @@
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
-#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \
mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
@@ -87,6 +90,7 @@
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
+ WORKAROUND; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
@@ -206,6 +210,40 @@
st8 [r25]=r10; /* ar.ssd */ \
;;
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
-#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
+#define RSE_WORKAROUND \
+(pUStk) extr.u r17=r18,3,6; \
+(pUStk) sub r16=r18,r22; \
+[1:](pKStk) br.cond.sptk.many 1f; \
+ .xdata4 ".data.patch.rse",1b-. \
+ ;; \
+ cmp.ge p6,p7 = 33,r17; \
+ ;; \
+(p6) mov r17=0x310; \
+(p7) mov r17=0x308; \
+ ;; \
+ cmp.leu p1,p0=r16,r17; \
+(p1) br.cond.sptk.many 1f; \
+ dep.z r17=r26,0,62; \
+ movl r16=2f; \
+ ;; \
+ mov ar.pfs=r17; \
+ dep r27=r0,r27,16,14; \
+ mov b0=r16; \
+ ;; \
+ br.ret.sptk b0; \
+ ;; \
+2: \
+ mov ar.rsc=r0 \
+ ;; \
+ flushrs; \
+ ;; \
+ mov ar.bspstore=r22 \
+ ;; \
+ mov r18=ar.bsp; \
+ ;; \
+1: \
+ .pred.rel "mutex", pKStk, pUStk
+
+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND)
+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
+#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index e0dca8743dbb..b83b2c516008 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -115,6 +115,29 @@ ia64_patch_vtop (unsigned long start, unsigned long end)
ia64_srlz_i();
}
+/*
+ * Disable the RSE workaround by turning the conditional branch
+ * that we tagged in each place the workaround was used into an
+ * unconditional branch.
+ */
+void __init
+ia64_patch_rse (unsigned long start, unsigned long end)
+{
+ s32 *offp = (s32 *) start;
+ u64 ip, *b;
+
+ while (offp < (s32 *) end) {
+ ip = (u64) offp + *offp;
+
+ b = (u64 *)(ip & -16);
+ b[1] &= ~0xf800000L;
+ ia64_fc((void *) ip);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
void __init
ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
{
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index e9596cd0cdab..f48a809c686d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -560,6 +560,17 @@ setup_arch (char **cmdline_p)
/* process SAL system table: */
ia64_sal_init(__va(efi.sal_systab));
+#ifdef CONFIG_ITANIUM
+ ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
+#else
+ {
+ u64 num_phys_stacked;
+
+ if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
+ ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
+ }
+#endif
+
#ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id();
#endif
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 80622acc95de..5929ab10a289 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -156,6 +156,13 @@ SECTIONS
__end___vtop_patchlist = .;
}
+ .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
+ {
+ __start___rse_patchlist = .;
+ *(.data.patch.rse)
+ __end___rse_patchlist = .;
+ }
+
.data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1d035082e78e..93acb3c1859d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -308,6 +308,9 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+
source "mm/Kconfig"
comment "I/O subsystem configuration"
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 655d52543e2d..ad40729bec3d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -130,6 +130,7 @@ static void appldata_work_fn(struct work_struct *work)
P_DEBUG(" -= Work Queue =-\n");
i = 0;
+ get_online_cpus();
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
@@ -140,6 +141,7 @@ static void appldata_work_fn(struct work_struct *work)
}
}
spin_unlock(&appldata_ops_lock);
+ put_online_cpus();
}
/*
@@ -266,12 +268,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
+ get_online_cpus();
spin_lock(&appldata_timer_lock);
if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
+ put_online_cpus();
out:
*lenp = len;
*ppos += len;
@@ -314,10 +318,12 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
return -EINVAL;
}
+ get_online_cpus();
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
+ put_online_cpus();
P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
interval);
@@ -556,8 +562,10 @@ static int __init appldata_init(void)
return -ENOMEM;
}
+ get_online_cpus();
for_each_online_cpu(i)
appldata_online_cpu(i);
+ put_online_cpus();
/* Register cpu hotplug notifier */
register_hotcpu_notifier(&appldata_nb);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index aa341d0ea1e6..c5cdb975d590 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25
-# Wed Apr 30 11:07:45 2008
+# Linux kernel version: 2.6.26-rc4
+# Fri May 30 09:49:33 2008
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
@@ -103,6 +103,7 @@ CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_MODVERSIONS=y
@@ -173,6 +174,7 @@ CONFIG_PREEMPT=y
# CONFIG_PREEMPT_RCU is not set
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -210,6 +212,7 @@ CONFIG_FORCE_MAX_ZONEORDER=9
CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set
# CONFIG_CMM is not set
+# CONFIG_PAGE_STATES is not set
CONFIG_VIRT_TIMER=y
CONFIG_VIRT_CPU_ACCOUNTING=y
# CONFIG_APPLDATA_BASE is not set
@@ -620,6 +623,7 @@ CONFIG_S390_VMUR=m
#
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
+CONFIG_ACCESSIBILITY=y
#
# File systems
@@ -754,11 +758,12 @@ CONFIG_FRAME_WARN=2048
CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_CHECK=y
+# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c14a336f6300..d2f270c995d9 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -208,7 +208,7 @@ static const unsigned char formats[][7] = {
[INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */
[INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
[INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
- [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */
+ [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */
[INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */
[INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1f4228948dc4..42b1d12ebb10 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1089,7 +1089,7 @@ out:
#ifdef CONFIG_HOTPLUG_CPU
-int smp_rescan_cpus(void)
+int __ref smp_rescan_cpus(void)
{
cpumask_t newcpus;
int cpu;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 29f3a63806b9..05598649b326 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
void show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ unsigned long i, total = 0, reserved = 0;
+ unsigned long shared = 0, cached = 0;
+ unsigned long flags;
struct page *page;
+ pg_data_t *pgdat;
printk("Mem-info:\n");
show_free_areas();
- i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
- continue;
- page = pfn_to_page(i);
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page) - 1;
+ for_each_online_pgdat(pgdat) {
+ pgdat_resize_lock(pgdat, &flags);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ if (!pfn_valid(pgdat->node_start_pfn + i))
+ continue;
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
}
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
-
- printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
- printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
- printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
- printk("%lu pages slab\n",
- global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE));
- printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
+ printk("%ld pages of RAM\n", total);
+ printk("%ld reserved pages\n", reserved);
+ printk("%ld pages shared\n", shared);
+ printk("%ld pages swap cached\n", cached);
}
/*
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ea2804808f39..f591188fa2c0 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -27,12 +27,19 @@ struct memory_segment {
static LIST_HEAD(mem_segs);
-static pud_t *vmem_pud_alloc(void)
+static void __ref *vmem_alloc_pages(unsigned int order)
+{
+ if (slab_is_available())
+ return (void *)__get_free_pages(GFP_KERNEL, order);
+ return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
+}
+
+static inline pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
#ifdef CONFIG_64BIT
- pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
+ pud = vmem_alloc_pages(2);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void)
return pud;
}
-static pmd_t *vmem_pmd_alloc(void)
+static inline pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
#ifdef CONFIG_64BIT
- pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
+ pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
if (pte_none(*pt_dir)) {
unsigned long new_page;
- new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
+ new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte;
}
}
+ memset(start, 0, nr * sizeof(struct page));
ret = 0;
out:
flush_tlb_kernel_range(start_addr, end_addr);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 72eccae4904b..422cfcad486d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -760,6 +760,21 @@ static void device_remove_class_symlinks(struct device *dev)
}
/**
+ * dev_set_name - set a device name
+ * @dev: device
+ */
+int dev_set_name(struct device *dev, const char *fmt, ...)
+{
+ va_list vargs;
+
+ va_start(vargs, fmt);
+ vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs);
+ va_end(vargs);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_set_name);
+
+/**
* device_add - add device to device hierarchy.
* @dev: device.
*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7fce038fa57e..86f0a2430624 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -928,13 +928,13 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
- unlock_policy_rwsem_write(cpu);
-
if (ret) {
dprintk("setting policy failed\n");
goto err_out_unregister;
}
+ unlock_policy_rwsem_write(cpu);
+
kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
dprintk("initialization complete\n");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 4a95adc4cc78..af58a6f1e898 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -807,6 +807,8 @@ static int atkbd_activate(struct atkbd *atkbd)
static void atkbd_cleanup(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
+
+ atkbd_disable(atkbd);
ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_BAT);
}
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 3dea0c5077a9..45767e73f071 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -136,6 +136,9 @@ static void pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
set_bit(code, input_dev->keybit);
}
+ for (i = 0; i < pdata->direct_key_num; i++)
+ set_bit(pdata->direct_key_map[i], input_dev->keybit);
+
keypad->rotary_up_key[0] = pdata->rotary0_up_key;
keypad->rotary_up_key[1] = pdata->rotary1_up_key;
keypad->rotary_down_key[0] = pdata->rotary0_down_key;
@@ -143,17 +146,21 @@ static void pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
- if (pdata->rotary0_up_key && pdata->rotary0_down_key) {
- set_bit(pdata->rotary0_up_key, input_dev->keybit);
- set_bit(pdata->rotary0_down_key, input_dev->keybit);
- } else
- set_bit(pdata->rotary0_rel_code, input_dev->relbit);
-
- if (pdata->rotary1_up_key && pdata->rotary1_down_key) {
- set_bit(pdata->rotary1_up_key, input_dev->keybit);
- set_bit(pdata->rotary1_down_key, input_dev->keybit);
- } else
- set_bit(pdata->rotary1_rel_code, input_dev->relbit);
+ if (pdata->enable_rotary0) {
+ if (pdata->rotary0_up_key && pdata->rotary0_down_key) {
+ set_bit(pdata->rotary0_up_key, input_dev->keybit);
+ set_bit(pdata->rotary0_down_key, input_dev->keybit);
+ } else
+ set_bit(pdata->rotary0_rel_code, input_dev->relbit);
+ }
+
+ if (pdata->enable_rotary1) {
+ if (pdata->rotary1_up_key && pdata->rotary1_down_key) {
+ set_bit(pdata->rotary1_up_key, input_dev->keybit);
+ set_bit(pdata->rotary1_down_key, input_dev->keybit);
+ } else
+ set_bit(pdata->rotary1_rel_code, input_dev->relbit);
+ }
}
static inline unsigned int lookup_matrix_keycode(
@@ -484,8 +491,13 @@ static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
keypad->input_dev = input_dev;
input_set_drvdata(input_dev, keypad);
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_REL);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ if ((keypad->pdata->enable_rotary0 &&
+ keypad->pdata->rotary0_rel_code) ||
+ (keypad->pdata->enable_rotary1 &&
+ keypad->pdata->rotary1_rel_code)) {
+ input_dev->evbit[0] |= BIT_MASK(EV_REL);
+ }
pxa27x_keypad_build_keycode(keypad);
platform_set_drvdata(pdev, keypad);
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 9531d8c7444f..d82f7f727f7a 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/input-polldev.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5ece9f56babc..9aafa96cb746 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -331,6 +331,13 @@ static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
},
},
{
+ .ident = "Acer TravelMate 660",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
+ },
+ },
+ {
.ident = "Acer TravelMate 2490",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 65a74cfc187b..592ff55b62d0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -885,6 +885,20 @@ static long i8042_panic_blink(long count)
#undef DELAY
+#ifdef CONFIG_X86
+static void i8042_dritek_enable(void)
+{
+ char param = 0x90;
+ int error;
+
+ error = i8042_command(&param, 0x1059);
+ if (error)
+ printk(KERN_WARNING
+ "Failed to enable DRITEK extension: %d\n",
+ error);
+}
+#endif
+
#ifdef CONFIG_PM
/*
* Here we try to restore the original BIOS settings. We only want to
@@ -942,6 +956,12 @@ static int i8042_resume(struct platform_device *dev)
return -EIO;
}
+
+#ifdef CONFIG_X86
+ if (i8042_dritek)
+ i8042_dritek_enable();
+#endif
+
if (i8042_mux_present) {
if (i8042_set_mux_mode(1, NULL) || i8042_enable_mux_ports())
printk(KERN_WARNING
@@ -1160,6 +1180,11 @@ static int __devinit i8042_probe(struct platform_device *dev)
if (error)
return error;
+#ifdef CONFIG_X86
+ if (i8042_dritek)
+ i8042_dritek_enable();
+#endif
+
if (!i8042_noaux) {
error = i8042_setup_aux();
if (error && error != -ENODEV && error != -EBUSY)
@@ -1171,14 +1196,6 @@ static int __devinit i8042_probe(struct platform_device *dev)
if (error)
goto out_fail;
}
-#ifdef CONFIG_X86
- if (i8042_dritek) {
- char param = 0x90;
- error = i8042_command(&param, 0x1059);
- if (error)
- goto out_fail;
- }
-#endif
/*
* Ok, everything is ready, let's register all serio ports
*/
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index c5a8661a1baa..1e748e46d12e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -830,7 +830,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
struct gtco *gtco;
struct input_dev *input_dev;
struct hid_descriptor *hid_desc;
- char *report = NULL;
+ char *report;
int result = 0, retry;
int error;
struct usb_endpoint_descriptor *endpoint;
@@ -916,12 +916,16 @@ static int gtco_probe(struct usb_interface *usbinterface,
le16_to_cpu(hid_desc->wDescriptorLength),
5000); /* 5 secs */
- if (result == le16_to_cpu(hid_desc->wDescriptorLength))
+ dbg("usb_control_msg result: %d", result);
+ if (result == le16_to_cpu(hid_desc->wDescriptorLength)) {
+ parse_hid_report_descriptor(gtco, report, result);
break;
+ }
}
+ kfree(report);
+
/* If we didn't get the report, fail */
- dbg("usb_control_msg result: :%d", result);
if (result != le16_to_cpu(hid_desc->wDescriptorLength)) {
err("Failed to get HID Report Descriptor of size: %d",
hid_desc->wDescriptorLength);
@@ -929,12 +933,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
goto err_free_urb;
}
- /* Now we parse the report */
- parse_hid_report_descriptor(gtco, report, result);
-
- /* Now we delete it */
- kfree(report);
-
/* Create a device file node */
usb_make_path(gtco->usbdev, gtco->usbpath, sizeof(gtco->usbpath));
strlcat(gtco->usbpath, "/input0", sizeof(gtco->usbpath));
@@ -988,7 +986,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
gtco->buffer, gtco->buf_dma);
err_free_devs:
- kfree(report);
input_free_device(input_dev);
kfree(gtco);
return error;
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index 01278bd7e65c..838458792ea0 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -85,6 +85,15 @@ module_param(delay, int, 0);
MODULE_PARM_DESC(delay, "Set adc sample delay.");
/*
+ * Set five_wire = 1 to use a 5 wire touchscreen.
+ *
+ * NOTE: Five wire mode does not allow for readback of pressure.
+ */
+static int five_wire;
+module_param(five_wire, int, 0);
+MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen.");
+
+/*
* Set adc mask function.
*
* Sources of glitch noise, such as signals driving an LCD display, may feed
@@ -162,6 +171,19 @@ static void wm9713_phy_init(struct wm97xx *wm)
64000 / rpu);
}
+ /* Five wire panel? */
+ if (five_wire) {
+ dig3 |= WM9713_45W;
+ dev_info(wm->dev, "setting 5-wire touchscreen mode.");
+
+ if (pil) {
+ dev_warn(wm->dev,
+ "Pressure measurement not supported in 5 "
+ "wire mode, disabling\n");
+ pil = 0;
+ }
+ }
+
/* touchpanel pressure */
if (pil == 2) {
dig3 |= WM9712_PIL;
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index e9c7ea46b6e3..cdc24ad314e0 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -608,6 +608,17 @@ static int wm97xx_probe(struct device *dev)
goto alloc_err;
}
+ /* set up physical characteristics */
+ wm->codec->phy_init(wm);
+
+ /* load gpio cache */
+ wm->gpio[0] = wm97xx_reg_read(wm, AC97_GPIO_CFG);
+ wm->gpio[1] = wm97xx_reg_read(wm, AC97_GPIO_POLARITY);
+ wm->gpio[2] = wm97xx_reg_read(wm, AC97_GPIO_STICKY);
+ wm->gpio[3] = wm97xx_reg_read(wm, AC97_GPIO_WAKEUP);
+ wm->gpio[4] = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
+ wm->gpio[5] = wm97xx_reg_read(wm, AC97_MISC_AFE);
+
wm->input_dev = input_allocate_device();
if (wm->input_dev == NULL) {
ret = -ENOMEM;
@@ -616,6 +627,7 @@ static int wm97xx_probe(struct device *dev)
/* set up touch configuration */
wm->input_dev->name = "wm97xx touchscreen";
+ wm->input_dev->phys = "wm97xx";
wm->input_dev->open = wm97xx_ts_input_open;
wm->input_dev->close = wm97xx_ts_input_close;
set_bit(EV_ABS, wm->input_dev->evbit);
@@ -634,17 +646,6 @@ static int wm97xx_probe(struct device *dev)
if (ret < 0)
goto dev_alloc_err;
- /* set up physical characteristics */
- wm->codec->phy_init(wm);
-
- /* load gpio cache */
- wm->gpio[0] = wm97xx_reg_read(wm, AC97_GPIO_CFG);
- wm->gpio[1] = wm97xx_reg_read(wm, AC97_GPIO_POLARITY);
- wm->gpio[2] = wm97xx_reg_read(wm, AC97_GPIO_STICKY);
- wm->gpio[3] = wm97xx_reg_read(wm, AC97_GPIO_WAKEUP);
- wm->gpio[4] = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
- wm->gpio[5] = wm97xx_reg_read(wm, AC97_MISC_AFE);
-
/* register our battery device */
wm->battery_dev = platform_device_alloc("wm97xx-battery", -1);
if (!wm->battery_dev) {
@@ -801,7 +802,7 @@ void wm97xx_unregister_mach_ops(struct wm97xx *wm)
EXPORT_SYMBOL_GPL(wm97xx_unregister_mach_ops);
static struct device_driver wm97xx_driver = {
- .name = "ac97",
+ .name = "wm97xx-ts",
.bus = &ac97_bus_type,
.owner = THIS_MODULE,
.probe = wm97xx_probe,
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 3f28f6eabdbf..a0ce0b2fa03e 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -3821,7 +3821,7 @@ TPACPI_HANDLE(led, ec, "SLED", /* 570 */
#define TPACPI_LED_NUMLEDS 8
static struct tpacpi_led_classdev *tpacpi_leds;
static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
-static const char const *tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
+static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
/* there's a limit of 19 chars + NULL before 2.6.26 */
"tpacpi::power",
"tpacpi:orange:batt",
@@ -3860,10 +3860,10 @@ static int led_get_status(unsigned int led)
static int led_set_status(unsigned int led, enum led_status_t ledstatus)
{
/* off, on, blink. Index is led_status_t */
- static const int const led_sled_arg1[] = { 0, 1, 3 };
- static const int const led_exp_hlbl[] = { 0, 0, 1 }; /* led# * */
- static const int const led_exp_hlcl[] = { 0, 1, 1 }; /* led# * */
- static const int const led_led_arg1[] = { 0, 0x80, 0xc0 };
+ static const int led_sled_arg1[] = { 0, 1, 3 };
+ static const int led_exp_hlbl[] = { 0, 0, 1 }; /* led# * */
+ static const int led_exp_hlcl[] = { 0, 1, 1 }; /* led# * */
+ static const int led_led_arg1[] = { 0, 0x80, 0xc0 };
int rc = 0;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 37783cdd301a..dfa4bdd5597c 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -737,6 +737,7 @@ struct b43_wl {
struct ieee80211_tx_control beacon_txctl;
bool beacon0_uploaded;
bool beacon1_uploaded;
+ bool beacon_templates_virgin; /* Never wrote the templates? */
struct work_struct beacon_update_trigger;
/* The current QOS parameters for the 4 queues.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 8fdba9415c04..6c3d9ea0a9f8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1544,6 +1544,30 @@ static void b43_write_probe_resp_template(struct b43_wldev *dev,
kfree(probe_resp_data);
}
+static void b43_upload_beacon0(struct b43_wldev *dev)
+{
+ struct b43_wl *wl = dev->wl;
+
+ if (wl->beacon0_uploaded)
+ return;
+ b43_write_beacon_template(dev, 0x68, 0x18);
+ /* FIXME: Probe resp upload doesn't really belong here,
+ * but we don't use that feature anyway. */
+ b43_write_probe_resp_template(dev, 0x268, 0x4A,
+ &__b43_ratetable[3]);
+ wl->beacon0_uploaded = 1;
+}
+
+static void b43_upload_beacon1(struct b43_wldev *dev)
+{
+ struct b43_wl *wl = dev->wl;
+
+ if (wl->beacon1_uploaded)
+ return;
+ b43_write_beacon_template(dev, 0x468, 0x1A);
+ wl->beacon1_uploaded = 1;
+}
+
static void handle_irq_beacon(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
@@ -1568,24 +1592,27 @@ static void handle_irq_beacon(struct b43_wldev *dev)
return;
}
- if (!beacon0_valid) {
- if (!wl->beacon0_uploaded) {
- b43_write_beacon_template(dev, 0x68, 0x18);
- b43_write_probe_resp_template(dev, 0x268, 0x4A,
- &__b43_ratetable[3]);
- wl->beacon0_uploaded = 1;
- }
+ if (unlikely(wl->beacon_templates_virgin)) {
+ /* We never uploaded a beacon before.
+ * Upload both templates now, but only mark one valid. */
+ wl->beacon_templates_virgin = 0;
+ b43_upload_beacon0(dev);
+ b43_upload_beacon1(dev);
cmd = b43_read32(dev, B43_MMIO_MACCMD);
cmd |= B43_MACCMD_BEACON0_VALID;
b43_write32(dev, B43_MMIO_MACCMD, cmd);
- } else if (!beacon1_valid) {
- if (!wl->beacon1_uploaded) {
- b43_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ } else {
+ if (!beacon0_valid) {
+ b43_upload_beacon0(dev);
+ cmd = b43_read32(dev, B43_MMIO_MACCMD);
+ cmd |= B43_MACCMD_BEACON0_VALID;
+ b43_write32(dev, B43_MMIO_MACCMD, cmd);
+ } else if (!beacon1_valid) {
+ b43_upload_beacon1(dev);
+ cmd = b43_read32(dev, B43_MMIO_MACCMD);
+ cmd |= B43_MACCMD_BEACON1_VALID;
+ b43_write32(dev, B43_MMIO_MACCMD, cmd);
}
- cmd = b43_read32(dev, B43_MMIO_MACCMD);
- cmd |= B43_MACCMD_BEACON1_VALID;
- b43_write32(dev, B43_MMIO_MACCMD, cmd);
}
}
@@ -4073,6 +4100,9 @@ static int b43_op_start(struct ieee80211_hw *hw)
wl->filter_flags = 0;
wl->radiotap_enabled = 0;
b43_qos_clear(wl);
+ wl->beacon0_uploaded = 0;
+ wl->beacon1_uploaded = 0;
+ wl->beacon_templates_virgin = 1;
/* First register RFkill.
* LEDs that are registered later depend on it. */
@@ -4241,7 +4271,9 @@ static void b43_chip_reset(struct work_struct *work)
goto out;
}
}
- out:
+out:
+ if (err)
+ wl->current_dev = NULL; /* Failed to init the dev. */
mutex_unlock(&wl->mutex);
if (err)
b43err(wl, "Controller restart FAILED\n");
@@ -4382,9 +4414,11 @@ static void b43_one_core_detach(struct ssb_device *dev)
struct b43_wldev *wldev;
struct b43_wl *wl;
+ /* Do not cancel ieee80211-workqueue based work here.
+ * See comment in b43_remove(). */
+
wldev = ssb_get_drvdata(dev);
wl = wldev->wl;
- cancel_work_sync(&wldev->restart_work);
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
@@ -4569,6 +4603,10 @@ static void b43_remove(struct ssb_device *dev)
struct b43_wl *wl = ssb_get_devtypedata(dev);
struct b43_wldev *wldev = ssb_get_drvdata(dev);
+ /* We must cancel any work here before unregistering from ieee80211,
+ * as the ieee80211 unreg will destroy the workqueue. */
+ cancel_work_sync(&wldev->restart_work);
+
B43_WARN_ON(!wl);
if (wl->current_dev == wldev)
ieee80211_unregister_hw(wl->hw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index c9847b1a67f7..3a7f0cb710ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -1162,7 +1162,6 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
/* Higher rate not available, use the original */
} else {
- new_rate = rate;
break;
}
}
@@ -2009,7 +2008,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
* 2) Not just finishing up a search
* 3) Allowing a new search
*/
- if (!update_lq && !done_search && !lq_sta->stay_in_tbl) {
+ if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) {
/* Save current throughput to compare with "search" throughput*/
lq_sta->last_tpt = current_tpt;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d0b1fb15c709..18c9931e3267 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -116,6 +116,7 @@ MODULE_PARM_DESC(workaround_interval,
#define OID_802_11_ENCRYPTION_STATUS ccpu2(0x0d01011b)
#define OID_802_11_ADD_KEY ccpu2(0x0d01011d)
#define OID_802_11_REMOVE_KEY ccpu2(0x0d01011e)
+#define OID_802_11_ASSOCIATION_INFORMATION ccpu2(0x0d01011f)
#define OID_802_11_PMKID ccpu2(0x0d010123)
#define OID_802_11_NETWORK_TYPES_SUPPORTED ccpu2(0x0d010203)
#define OID_802_11_NETWORK_TYPE_IN_USE ccpu2(0x0d010204)
@@ -271,6 +272,26 @@ struct ndis_config_param {
__le32 value_length;
} __attribute__((packed));
+struct ndis_80211_assoc_info {
+ __le32 length;
+ __le16 req_ies;
+ struct req_ie {
+ __le16 capa;
+ __le16 listen_interval;
+ u8 cur_ap_address[6];
+ } req_ie;
+ __le32 req_ie_length;
+ __le32 offset_req_ies;
+ __le16 resp_ies;
+ struct resp_ie {
+ __le16 capa;
+ __le16 status_code;
+ __le16 assoc_id;
+ } resp_ie;
+ __le32 resp_ie_length;
+ __le32 offset_resp_ies;
+} __attribute__((packed));
+
/* these have to match what is in wpa_supplicant */
enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
@@ -674,6 +695,12 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
return ret;
}
+static int get_association_info(struct usbnet *usbdev,
+ struct ndis_80211_assoc_info *info, int len)
+{
+ return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION,
+ info, &len);
+}
static int is_associated(struct usbnet *usbdev)
{
@@ -2182,11 +2209,40 @@ static void rndis_wext_worker(struct work_struct *work)
struct usbnet *usbdev = priv->usbdev;
union iwreq_data evt;
unsigned char bssid[ETH_ALEN];
- int ret;
+ struct ndis_80211_assoc_info *info;
+ int assoc_size = sizeof(*info) + IW_CUSTOM_MAX + 32;
+ int ret, offset;
if (test_and_clear_bit(WORK_CONNECTION_EVENT, &priv->work_pending)) {
- ret = get_bssid(usbdev, bssid);
+ info = kzalloc(assoc_size, GFP_KERNEL);
+ if (!info)
+ goto get_bssid;
+
+ /* Get association info IEs from device and send them back to
+ * userspace. */
+ ret = get_association_info(usbdev, info, assoc_size);
+ if (!ret) {
+ evt.data.length = le32_to_cpu(info->req_ie_length);
+ if (evt.data.length > 0) {
+ offset = le32_to_cpu(info->offset_req_ies);
+ wireless_send_event(usbdev->net,
+ IWEVASSOCREQIE, &evt,
+ (char *)info + offset);
+ }
+
+ evt.data.length = le32_to_cpu(info->resp_ie_length);
+ if (evt.data.length > 0) {
+ offset = le32_to_cpu(info->offset_resp_ies);
+ wireless_send_event(usbdev->net,
+ IWEVASSOCRESPIE, &evt,
+ (char *)info + offset);
+ }
+ }
+
+ kfree(info);
+get_bssid:
+ ret = get_bssid(usbdev, bssid);
if (!ret) {
evt.data.flags = 0;
evt.data.length = 0;
@@ -2414,6 +2470,11 @@ static int bcm4320_early_init(struct usbnet *dev)
else if (priv->param_power_save > 2)
priv->param_power_save = 2;
+ if (priv->param_power_output < 0)
+ priv->param_power_output = 0;
+ else if (priv->param_power_output > 3)
+ priv->param_power_output = 3;
+
if (priv->param_roamtrigger < -80)
priv->param_roamtrigger = -80;
else if (priv->param_roamtrigger > -60)
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 57bdc153952f..611d98320593 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -328,6 +328,11 @@ static inline int rt2x00_get_link_ant_rssi(struct link *link)
return DEFAULT_RSSI;
}
+static inline void rt2x00_reset_link_ant_rssi(struct link *link)
+{
+ link->ant.rssi_ant = 0;
+}
+
static inline int rt2x00_get_link_ant_rssi_history(struct link *link,
enum antenna ant)
{
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index a9930a03f450..48608e8cc8b4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -129,6 +129,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
*/
rt2x00dev->ops->lib->config(rt2x00dev, &libconf, CONFIG_UPDATE_ANTENNA);
rt2x00lib_reset_link_tuner(rt2x00dev);
+ rt2x00_reset_link_ant_rssi(&rt2x00dev->link);
rt2x00dev->link.ant.active.rx = libconf.ant.rx;
rt2x00dev->link.ant.active.tx = libconf.ant.tx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index b22c02737185..2673d568bcac 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -483,9 +483,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
return;
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beacondone_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beacondone_iter,
+ rt2x00dev);
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
}
@@ -507,7 +507,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* Update TX statistics.
*/
rt2x00dev->link.qual.tx_success += success;
- rt2x00dev->link.qual.tx_failed += txdesc->retry + fail;
+ rt2x00dev->link.qual.tx_failed += fail;
/*
* Initialize TX status
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index c206b5092070..87e280a21971 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -93,6 +93,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) {
ieee80211_stop_queues(hw);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/rtl8180_grf5101.c b/drivers/net/wireless/rtl8180_grf5101.c
index 5d47935dbac3..947ee55f18b2 100644
--- a/drivers/net/wireless/rtl8180_grf5101.c
+++ b/drivers/net/wireless/rtl8180_grf5101.c
@@ -88,7 +88,7 @@ static void grf5101_rf_set_channel(struct ieee80211_hw *dev,
write_grf5101(dev, 0x0B, chan);
write_grf5101(dev, 0x07, 0x1000);
- grf5101_write_phy_antenna(dev, chan);
+ grf5101_write_phy_antenna(dev, channel);
}
static void grf5101_rf_stop(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/rtl8180_max2820.c b/drivers/net/wireless/rtl8180_max2820.c
index a34dfd382b6d..6c825fd7f3b6 100644
--- a/drivers/net/wireless/rtl8180_max2820.c
+++ b/drivers/net/wireless/rtl8180_max2820.c
@@ -78,7 +78,8 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev,
struct ieee80211_conf *conf)
{
struct rtl8180_priv *priv = dev->priv;
- int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+ int channel = conf ?
+ ieee80211_frequency_to_channel(conf->channel->center_freq) : 1;
unsigned int chan_idx = channel - 1;
u32 txpw = priv->channels[chan_idx].hw_value & 0xFF;
u32 chan = max2820_chan[chan_idx];
@@ -87,7 +88,7 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev,
* sa2400, for MAXIM we do this directly from BB */
rtl8180_write_phy(dev, 3, txpw);
- max2820_write_phy_antenna(dev, chan);
+ max2820_write_phy_antenna(dev, channel);
write_max2820(dev, 3, chan);
}
diff --git a/drivers/net/wireless/rtl8180_sa2400.c b/drivers/net/wireless/rtl8180_sa2400.c
index 0311b4ea124c..cea4e0ccb92d 100644
--- a/drivers/net/wireless/rtl8180_sa2400.c
+++ b/drivers/net/wireless/rtl8180_sa2400.c
@@ -86,7 +86,7 @@ static void sa2400_rf_set_channel(struct ieee80211_hw *dev,
write_sa2400(dev, 7, txpw);
- sa2400_write_phy_antenna(dev, chan);
+ sa2400_write_phy_antenna(dev, channel);
write_sa2400(dev, 0, chan);
write_sa2400(dev, 1, 0xbb50);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8ba3f135da22..1a4025683362 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -63,6 +63,7 @@ static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
*/
static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq;
+static wait_queue_head_t generic_waitq;
/*
* Allocate memory for a new device structure.
@@ -1151,11 +1152,15 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_block *block;
+ void (*callback)(struct dasd_ccw_req *, void *data);
+ void *callback_data;
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist);
block = cqr->block;
+ callback = cqr->callback;
+ callback_data = cqr->callback_data;
if (block)
spin_lock_bh(&block->queue_lock);
switch (cqr->status) {
@@ -1176,7 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
BUG();
}
if (cqr->callback != NULL)
- (cqr->callback)(cqr, cqr->callback_data);
+ (callback)(cqr, callback_data);
if (block)
spin_unlock_bh(&block->queue_lock);
}
@@ -1406,17 +1411,15 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
*/
int dasd_sleep_on(struct dasd_ccw_req *cqr)
{
- wait_queue_head_t wait_q;
struct dasd_device *device;
int rc;
device = cqr->startdev;
- init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &wait_q;
+ cqr->callback_data = (void *) &generic_waitq;
dasd_add_request_tail(cqr);
- wait_event(wait_q, _wait_for_wakeup(cqr));
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
/* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
@@ -1429,20 +1432,18 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
*/
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{
- wait_queue_head_t wait_q;
struct dasd_device *device;
int rc;
device = cqr->startdev;
- init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &wait_q;
+ cqr->callback_data = (void *) &generic_waitq;
dasd_add_request_tail(cqr);
- rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
+ rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
if (rc == -ERESTARTSYS) {
dasd_cancel_req(cqr);
/* wait (non-interruptible) for final status */
- wait_event(wait_q, _wait_for_wakeup(cqr));
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
}
rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
return rc;
@@ -1466,7 +1467,6 @@ static inline int _dasd_term_running_cqr(struct dasd_device *device)
int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
{
- wait_queue_head_t wait_q;
struct dasd_device *device;
int rc;
@@ -1478,9 +1478,8 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
return rc;
}
- init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &wait_q;
+ cqr->callback_data = (void *) &generic_waitq;
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
@@ -1489,7 +1488,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
spin_unlock_irq(get_ccwdev_lock(device->cdev));
- wait_event(wait_q, _wait_for_wakeup(cqr));
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
/* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
@@ -2430,6 +2429,7 @@ static int __init dasd_init(void)
init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq);
+ init_waitqueue_head(&generic_waitq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 0d98f1ff2edd..848ef7e8523f 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -549,7 +549,6 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
struct raw3270_request *rq)
{
unsigned long flags;
- wait_queue_head_t wq;
int rc;
#ifdef CONFIG_TN3270_CONSOLE
@@ -566,20 +565,20 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
return rq->rc;
}
#endif
- init_waitqueue_head(&wq);
rq->callback = raw3270_wake_init;
- rq->callback_data = &wq;
+ rq->callback_data = &raw3270_wait_queue;
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
if (rc)
return rc;
/* Now wait for the completion. */
- rc = wait_event_interruptible(wq, raw3270_request_final(rq));
+ rc = wait_event_interruptible(raw3270_wait_queue,
+ raw3270_request_final(rq));
if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
raw3270_halt_io(view->dev, rq);
/* No wait for the halt to complete. */
- wait_event(wq, raw3270_request_final(rq));
+ wait_event(raw3270_wait_queue, raw3270_request_final(rq));
return -ERESTARTSYS;
}
return rq->rc;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 9e784d5f7f57..ad05a87bc480 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
put_online_cpus();
}
-static void sclp_cpu_change_notify(struct work_struct *work)
+static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
smp_rescan_cpus();
}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 35707c04e613..62576af36f47 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -71,9 +71,6 @@ static struct list_head sclp_vt220_outqueue;
/* Number of requests in outqueue */
static int sclp_vt220_outqueue_count;
-/* Wait queue used to delay write requests while we've run out of buffers */
-static wait_queue_head_t sclp_vt220_waitq;
-
/* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */
static struct timer_list sclp_vt220_timer;
@@ -133,7 +130,6 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
} while (request && __sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current();
- wake_up(&sclp_vt220_waitq);
/* Check if the tty needs a wake up call */
if (sclp_vt220_tty != NULL) {
tty_wakeup(sclp_vt220_tty);
@@ -383,7 +379,7 @@ sclp_vt220_timeout(unsigned long data)
*/
static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
- int convertlf, int may_schedule)
+ int convertlf, int may_fail)
{
unsigned long flags;
void *page;
@@ -395,15 +391,14 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
overall_written = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
do {
- /* Create a sclp output buffer if none exists yet */
+ /* Create an sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) {
while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- if (in_interrupt() || !may_schedule)
- sclp_sync_wait();
+ if (may_fail)
+ goto out;
else
- wait_event(sclp_vt220_waitq,
- !list_empty(&sclp_vt220_empty));
+ sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
page = (void *) sclp_vt220_empty.next;
@@ -437,6 +432,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
add_timer(&sclp_vt220_timer);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+out:
return overall_written;
}
@@ -520,19 +516,11 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver.
- *
- * NOTE: include/linux/tty_driver.h specifies that a character should be
- * ignored if there is no room in the queue. This driver implements a different
- * semantic in that it will block when there is no more room left.
- *
- * FIXME: putchar can currently be called from BH and other non blocking
- * handlers so this semantic isn't a good idea.
*/
static int
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{
- __sclp_vt220_write(&ch, 1, 0, 0, 1);
- return 1;
+ return __sclp_vt220_write(&ch, 1, 0, 0, 1);
}
/*
@@ -653,7 +641,6 @@ static int __init __sclp_vt220_init(void)
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
- init_waitqueue_head(&sclp_vt220_waitq);
init_timer(&sclp_vt220_timer);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index dddf8d62c153..d0d565a05dfe 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -231,6 +231,9 @@ struct tape_device {
/* Request queue. */
struct list_head req_queue;
+ /* Request wait queue. */
+ wait_queue_head_t wait_queue;
+
/* Each tape device has (currently) two minor numbers. */
int first_minor;
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index ddc4a114e7f4..95da72bc17e8 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -179,11 +179,11 @@ tapeblock_requeue(struct work_struct *work) {
tapeblock_end_request(req, -EIO);
continue;
}
+ blkdev_dequeue_request(req);
+ nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req);
spin_lock_irq(&device->blk_data.request_queue_lock);
- blkdev_dequeue_request(req);
- nr_queued++;
}
spin_unlock_irq(&device->blk_data.request_queue_lock);
atomic_set(&device->blk_data.requeue_scheduled, 0);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 76e44eb7c47f..c20e3c548343 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -449,6 +449,7 @@ tape_alloc_device(void)
INIT_LIST_HEAD(&device->req_queue);
INIT_LIST_HEAD(&device->node);
init_waitqueue_head(&device->state_change_wq);
+ init_waitqueue_head(&device->wait_queue);
device->tape_state = TS_INIT;
device->medium_state = MS_UNKNOWN;
*device->modeset_byte = 0;
@@ -954,21 +955,19 @@ __tape_wake_up(struct tape_request *request, void *data)
int
tape_do_io(struct tape_device *device, struct tape_request *request)
{
- wait_queue_head_t wq;
int rc;
- init_waitqueue_head(&wq);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */
request->callback = __tape_wake_up;
- request->callback_data = &wq;
+ request->callback_data = &device->wait_queue;
/* Add request to request queue and try to start it. */
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc)
return rc;
/* Request added to the queue. Wait for its completion. */
- wait_event(wq, (request->callback == NULL));
+ wait_event(device->wait_queue, (request->callback == NULL));
/* Get rc from request */
return request->rc;
}
@@ -989,20 +988,19 @@ int
tape_do_io_interruptible(struct tape_device *device,
struct tape_request *request)
{
- wait_queue_head_t wq;
int rc;
- init_waitqueue_head(&wq);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */
request->callback = __tape_wake_up_interruptible;
- request->callback_data = &wq;
+ request->callback_data = &device->wait_queue;
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc)
return rc;
/* Request added to the queue. Wait for its completion. */
- rc = wait_event_interruptible(wq, (request->callback == NULL));
+ rc = wait_event_interruptible(device->wait_queue,
+ (request->callback == NULL));
if (rc != -ERESTARTSYS)
/* Request finished normally. */
return request->rc;
@@ -1015,7 +1013,7 @@ tape_do_io_interruptible(struct tape_device *device,
/* Wait for the interrupt that acknowledges the halt. */
do {
rc = wait_event_interruptible(
- wq,
+ device->wait_queue,
(request->callback == NULL)
);
} while (rc == -ERESTARTSYS);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index c1cb94e9f242..7e912f21fd36 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -155,9 +155,6 @@ static int generic_probe(struct usb_device *udev)
{
int err, c;
- /* put device-specific files into sysfs */
- usb_create_sysfs_dev_files(udev);
-
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
@@ -189,8 +186,6 @@ static void generic_disconnect(struct usb_device *udev)
* unconfigure the device */
if (udev->actconfig)
usb_set_configuration(udev, -1);
-
- usb_remove_sysfs_dev_files(udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 1e4b81e9eb50..a0bf5df6cb6f 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -213,6 +213,8 @@ struct hc_driver {
/* force handover of high-speed port to full-speed companion */
void (*relinquish_port)(struct usb_hcd *, int);
+ /* has a port been handed over to a companion? */
+ int (*port_handed_over)(struct usb_hcd *, int);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index eb57fcc701d7..8eb4da332f56 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1326,6 +1326,12 @@ void usb_disconnect(struct usb_device **pdev)
usb_unlock_device(udev);
+ /* Remove the device-specific files from sysfs. This must be
+ * done with udev unlocked, because some of the attribute
+ * routines try to acquire the device lock.
+ */
+ usb_remove_sysfs_dev_files(udev);
+
/* Unregister the device. The device driver is responsible
* for removing the device files from usbfs and sysfs and for
* de-configuring the device.
@@ -1541,6 +1547,9 @@ int usb_new_device(struct usb_device *udev)
goto fail;
}
+ /* put device-specific files into sysfs */
+ usb_create_sysfs_dev_files(udev);
+
/* Tell the world! */
announce_device(udev);
return err;
@@ -2744,7 +2753,11 @@ loop:
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
}
- dev_err(hub_dev, "unable to enumerate USB device on port %d\n", port1);
+ if (hub->hdev->parent ||
+ !hcd->driver->port_handed_over ||
+ !(hcd->driver->port_handed_over)(hcd, port1))
+ dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
+ port1);
done:
hub_port_disable(hub, port1, 1);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 2e2019390290..3da1ab4b389d 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Edirol SD-20 */
{ USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Avision AV600U */
+ { USB_DEVICE(0x0638, 0x0a13), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c783cb111847..5e1f5d55bf04 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -588,35 +588,33 @@ read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
container_of(kobj, struct device, kobj));
size_t nleft = count;
size_t srclen, n;
+ int cfgno;
+ void *src;
- usb_lock_device(udev);
-
- /* The binary attribute begins with the device descriptor */
- srclen = sizeof(struct usb_device_descriptor);
- if (off < srclen) {
- n = min_t(size_t, nleft, srclen - off);
- memcpy(buf, off + (char *) &udev->descriptor, n);
- nleft -= n;
- buf += n;
- off = 0;
- } else {
- off -= srclen;
- }
-
- /* Then follows the raw descriptor entry for the current
- * configuration (config plus subsidiary descriptors).
+ /* The binary attribute begins with the device descriptor.
+ * Following that are the raw descriptor entries for all the
+ * configurations (config plus subsidiary descriptors).
*/
- if (udev->actconfig) {
- int cfgno = udev->actconfig - udev->config;
-
- srclen = __le16_to_cpu(udev->actconfig->desc.wTotalLength);
+ for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
+ nleft > 0; ++cfgno) {
+ if (cfgno < 0) {
+ src = &udev->descriptor;
+ srclen = sizeof(struct usb_device_descriptor);
+ } else {
+ src = udev->rawdescriptors[cfgno];
+ srclen = __le16_to_cpu(udev->config[cfgno].desc.
+ wTotalLength);
+ }
if (off < srclen) {
- n = min_t(size_t, nleft, srclen - off);
- memcpy(buf, off + udev->rawdescriptors[cfgno], n);
+ n = min(nleft, srclen - (size_t) off);
+ memcpy(buf, src + off, n);
nleft -= n;
+ buf += n;
+ off = 0;
+ } else {
+ off -= srclen;
}
}
- usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 651b82701394..18687543d7fa 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -1627,7 +1627,9 @@ static int reset_queues(struct fsl_udc *udc)
udc_reset_ep_queue(udc, pipe);
/* report disconnect; the driver is already quiesced */
+ spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
return 0;
}
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 8b5f991e949c..08a4335401a9 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -223,6 +223,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 6d9bed6c1f48..7370d6187c64 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -269,7 +269,7 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
if (retval)
return retval;
- ehci->is_tdi_rh_tt = 1;
+ hcd->has_tt = 1;
ehci->sbrn = 0x20;
@@ -295,10 +295,6 @@ static const struct hc_driver ehci_fsl_hc_driver = {
*/
.reset = ehci_fsl_setup,
.start = ehci_run,
-#ifdef CONFIG_PM
- .suspend = ehci_bus_suspend,
- .resume = ehci_bus_resume,
-#endif
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -322,6 +318,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
static int ehci_fsl_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 382587c4457c..740835bb8575 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -609,7 +609,7 @@ static int ehci_hub_control (
}
break;
case USB_PORT_FEAT_C_SUSPEND:
- /* we auto-clear this feature */
+ clear_bit(wIndex, &ehci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (ehci->hcs_params))
@@ -688,7 +688,7 @@ static int ehci_hub_control (
/* resume completed? */
else if (time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ set_bit(wIndex, &ehci->port_c_suspend);
ehci->reset_done[wIndex] = 0;
/* stop resume signaling */
@@ -765,6 +765,8 @@ static int ehci_hub_control (
status |= 1 << USB_PORT_FEAT_RESET;
if (temp & PORT_POWER)
status |= 1 << USB_PORT_FEAT_POWER;
+ if (test_bit(wIndex, &ehci->port_c_suspend))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
#ifndef VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
@@ -875,3 +877,13 @@ static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
set_owner(ehci, --portnum, PORT_OWNER);
}
+static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 __iomem *reg;
+
+ if (ehci_is_TDI(ehci))
+ return 0;
+ reg = &ehci->regs->port_status[portnum - 1];
+ return ehci_readl(ehci, reg) & PORT_OWNER;
+}
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 601c8795a854..9d042f220097 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -26,7 +26,7 @@ static int ixp4xx_ehci_init(struct usb_hcd *hcd)
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- ehci->is_tdi_rh_tt = 1;
+ hcd->has_tt = 1;
ehci_reset(ehci);
retval = ehci_init(hcd);
@@ -58,6 +58,8 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
static int ixp4xx_ehci_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 3adfda813a7b..ab625f0ba1d9 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -139,10 +139,6 @@ static const struct hc_driver ehci_orion_hc_driver = {
*/
.reset = ehci_orion_setup,
.start = ehci_run,
-#ifdef CONFIG_PM
- .suspend = ehci_bus_suspend,
- .resume = ehci_bus_resume,
-#endif
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -165,6 +161,8 @@ static const struct hc_driver ehci_orion_hc_driver = {
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
static void __init
@@ -250,7 +248,7 @@ static int __init ehci_orion_drv_probe(struct platform_device *pdev)
ehci->regs = hcd->regs + 0x100 +
HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- ehci->is_tdi_rh_tt = 1;
+ hcd->has_tt = 1;
ehci->sbrn = 0x20;
/*
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 5bb7f6bb13f3..c46a58f9181d 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -129,7 +129,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
switch (pdev->vendor) {
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
- ehci->is_tdi_rh_tt = 1;
hcd->has_tt = 1;
tdi_reset(ehci);
}
@@ -379,7 +378,8 @@ static const struct hc_driver ehci_pci_hc_driver = {
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ee305b1f99ff..b018deed2e8f 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -76,6 +76,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
diff --git a/drivers/usb/host/ehci-ppc-soc.c b/drivers/usb/host/ehci-ppc-soc.c
index 6c76036783a1..529590eb4037 100644
--- a/drivers/usb/host/ehci-ppc-soc.c
+++ b/drivers/usb/host/ehci-ppc-soc.c
@@ -163,6 +163,7 @@ static const struct hc_driver ehci_ppc_soc_hc_driver = {
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
static int ehci_hcd_ppc_soc_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 69782221bcf3..37e6abeb794c 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -73,6 +73,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
};
static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index be575e46eac3..b7853c8bac0f 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1349,18 +1349,27 @@ iso_stream_schedule (
/* when's the last uframe this urb could start? */
max = now + mod;
- /* typical case: reuse current schedule. stream is still active,
- * and no gaps from host falling behind (irq delays etc)
+ /* Typical case: reuse current schedule, stream is still active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc), but if there are we'll take the next
+ * slot in the schedule, implicitly assuming URB_ISO_ASAP.
*/
if (likely (!list_empty (&stream->td_list))) {
start = stream->next_uframe;
if (start < now)
start += mod;
- if (likely ((start + sched->span) < max))
- goto ready;
- /* else fell behind; someday, try to reschedule */
- status = -EL2NSYNC;
- goto fail;
+
+ /* Fell behind (by up to twice the slop amount)? */
+ if (start >= max - 2 * 8 * SCHEDULE_SLOP)
+ start += stream->interval * DIV_ROUND_UP(
+ max - start, stream->interval) - mod;
+
+ /* Tried to schedule too far into the future? */
+ if (unlikely((start + sched->span) >= max)) {
+ status = -EFBIG;
+ goto fail;
+ }
+ goto ready;
}
/* need to schedule; when's the next (u)frame we could start?
@@ -1613,6 +1622,9 @@ itd_complete (
} else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
desc->status = 0;
desc->actual_length = EHCI_ITD_LENGTH (t);
+ } else {
+ /* URB was too late */
+ desc->status = -EXDEV;
}
}
@@ -2095,7 +2107,7 @@ done:
static void
scan_periodic (struct ehci_hcd *ehci)
{
- unsigned frame, clock, now_uframe, mod;
+ unsigned now_uframe, frame, clock, clock_frame, mod;
unsigned modified;
mod = ehci->periodic_size << 3;
@@ -2111,6 +2123,7 @@ scan_periodic (struct ehci_hcd *ehci)
else
clock = now_uframe + mod - 1;
clock %= mod;
+ clock_frame = clock >> 3;
for (;;) {
union ehci_shadow q, *q_p;
@@ -2157,22 +2170,26 @@ restart:
case Q_TYPE_ITD:
/* If this ITD is still active, leave it for
* later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
*/
- rmb ();
- for (uf = 0; uf < 8 && live; uf++) {
- if (0 == (q.itd->hw_transaction [uf]
- & ITD_ACTIVE(ehci)))
- continue;
- incomplete = true;
- q_p = &q.itd->itd_next;
- hw_p = &q.itd->hw_next;
- type = Q_NEXT_TYPE(ehci,
+ if (frame == clock_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(ehci))
+ break;
+ }
+ if (uf < 8) {
+ incomplete = true;
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(ehci,
q.itd->hw_next);
- q = *q_p;
- break;
+ q = *q_p;
+ break;
+ }
}
- if (uf < 8 && live)
- break;
/* Take finished ITDs out of the schedule
* and process them: recycle, maybe report
@@ -2189,9 +2206,12 @@ restart:
case Q_TYPE_SITD:
/* If this SITD is still active, leave it for
* later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
*/
- if ((q.sitd->hw_results & SITD_ACTIVE(ehci))
- && live) {
+ if (frame == clock_frame && live &&
+ (q.sitd->hw_results &
+ SITD_ACTIVE(ehci))) {
incomplete = true;
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
@@ -2260,6 +2280,7 @@ restart:
/* rescan the rest of this frame, then ... */
clock = now;
+ clock_frame = clock >> 3;
} else {
now_uframe++;
now_uframe %= mod;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bf92d209a1a9..35a03095757e 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -97,6 +97,8 @@ struct ehci_hcd { /* one per controller */
dedicated to the companion controller */
unsigned long owned_ports; /* which ports are
owned by the companion during a bus suspend */
+ unsigned long port_c_suspend; /* which ports have
+ the change-suspend feature turned on */
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
@@ -112,7 +114,6 @@ struct ehci_hcd { /* one per controller */
u32 command;
/* SILICON QUIRKS */
- unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */
unsigned no_selective_suspend:1;
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned big_endian_mmio:1;
@@ -678,7 +679,7 @@ struct ehci_fstn {
* needed (mostly in root hub code).
*/
-#define ehci_is_TDI(e) ((e)->is_tdi_rh_tt)
+#define ehci_is_TDI(e) (ehci_to_hcd(e)->has_tt)
/* Returns the speed of a device attached to a port on the root hub. */
static inline unsigned int
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index a53db1d4e07a..eb6c06979f3b 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -269,3 +269,14 @@ config USB_TEST
See <http://www.linux-usb.org/usbtest/> for more information,
including sample test device firmware and "how to use it".
+config USB_ISIGHTFW
+ tristate "iSight firmware loading support"
+ depends on USB
+ help
+ This driver loads firmware for USB Apple iSight cameras, allowing
+ them to be driven by the USB video class driver available at
+ http://linux-uvc.berlios.de
+
+ The firmware for this driver must be extracted from the MacOS
+ driver beforehand. Tools for doing so are available at
+ http://bersace03.free.fr
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index b68e6b774f1a..aba091cb5ec0 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_EMI62) += emi62.o
obj-$(CONFIG_USB_FTDI_ELAN) += ftdi-elan.o
obj-$(CONFIG_USB_IDMOUSE) += idmouse.o
obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o
+obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
obj-$(CONFIG_USB_LCD) += usblcd.o
obj-$(CONFIG_USB_LD) += ldusb.o
obj-$(CONFIG_USB_LED) += usbled.o
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
new file mode 100644
index 000000000000..390e04885536
--- /dev/null
+++ b/drivers/usb/misc/isight_firmware.c
@@ -0,0 +1,131 @@
+/*
+ * Driver for loading USB isight firmware
+ *
+ * Copyright (C) 2008 Matthew Garrett <mjg@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
+ *
+ * The USB isight cameras in recent Apples are roughly compatible with the USB
+ * video class specification, and can be driven by uvcvideo. However, they
+ * need firmware to be loaded beforehand. After firmware loading, the device
+ * detaches from the USB bus and reattaches with a new device ID. It can then
+ * be claimed by the uvc driver.
+ *
+ * The firmware is non-free and must be extracted by the user. Tools to do this
+ * are available at http://bersace03.free.fr/ift/
+ *
+ * The isight firmware loading was reverse engineered by Johannes Berg
+ * <johannes@sipsolutions.de>, and this driver is based on code by Ronald
+ * Bultje <rbultje@ronald.bitfreak.net>
+ */
+
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+static struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x05ac, 0x8300)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static int isight_firmware_load(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ int llen, len, req, ret = 0;
+ const struct firmware *firmware;
+ unsigned char *buf;
+ unsigned char data[4];
+ char *ptr;
+
+ if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
+ printk(KERN_ERR "Unable to load isight firmware\n");
+ return -ENODEV;
+ }
+
+ ptr = firmware->data;
+
+ if (usb_control_msg
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
+ 300) != 1) {
+ printk(KERN_ERR
+ "Failed to initialise isight firmware loader\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ while (1) {
+ memcpy(data, ptr, 4);
+ len = (data[0] << 8 | data[1]);
+ req = (data[2] << 8 | data[3]);
+ ptr += 4;
+
+ if (len == 0x8001)
+ break; /* success */
+ else if (len == 0)
+ continue;
+
+ for (; len > 0; req += 50) {
+ llen = len > 50 ? 50 : len;
+ len -= llen;
+
+ buf = kmalloc(llen, GFP_KERNEL);
+ memcpy(buf, ptr, llen);
+
+ ptr += llen;
+
+ if (usb_control_msg
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, req, 0,
+ buf, llen, 300) != llen) {
+ printk(KERN_ERR
+ "Failed to load isight firmware\n");
+ kfree(buf);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ kfree(buf);
+ }
+ }
+ if (usb_control_msg
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
+ 300) != 1) {
+ printk(KERN_ERR "isight firmware loading completion failed\n");
+ ret = -ENODEV;
+ }
+out:
+ release_firmware(firmware);
+ return ret;
+}
+
+static void isight_firmware_disconnect(struct usb_interface *intf)
+{
+}
+
+static struct usb_driver isight_firmware_driver = {
+ .name = "isight_firmware",
+ .probe = isight_firmware_load,
+ .disconnect = isight_firmware_disconnect,
+ .id_table = id_table,
+};
+
+static int __init isight_firmware_init(void)
+{
+ return usb_register(&isight_firmware_driver);
+}
+
+static void __exit isight_firmware_exit(void)
+{
+ usb_deregister(&isight_firmware_driver);
+}
+
+module_init(isight_firmware_init);
+module_exit(isight_firmware_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3cee6feac174..5234e7a3bd2c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -174,8 +174,270 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
- { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID),
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0100_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0101_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0102_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0103_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0104_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0105_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0106_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0107_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0108_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0109_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0110_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0111_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0112_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0113_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0114_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0115_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0116_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0117_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0118_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0119_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0120_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0121_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0122_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0123_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0124_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0125_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0126_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0127_PID),
.driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0128_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0129_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012C_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0130_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0131_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0132_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0133_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0134_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0135_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0136_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0137_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0138_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0139_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0140_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0141_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0142_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0143_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0144_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0145_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0146_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0147_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0148_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0149_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0150_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0151_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0152_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0153_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0154_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0155_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0156_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0157_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0158_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0159_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0160_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0161_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0162_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0163_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0164_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0165_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0166_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0167_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0168_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0169_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0170_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0171_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0172_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0173_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0174_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0175_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0176_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0177_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0178_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0179_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0180_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0181_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0182_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0183_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0184_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0185_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0186_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0187_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0188_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0189_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0190_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0191_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0192_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0193_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0194_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0195_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0196_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0197_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0198_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0199_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019A_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019B_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019C_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019D_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019E_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019F_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A0_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A1_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A2_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A3_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A4_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A5_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A6_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A7_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A8_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A9_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AA_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AB_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AC_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AD_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AE_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AF_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B0_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B1_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B2_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B3_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B4_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B5_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B6_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B7_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B8_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B9_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BA_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BB_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BC_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BD_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BE_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BF_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C0_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C1_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C2_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C3_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C4_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C5_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C6_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C7_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C8_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C9_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CA_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CB_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CC_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CD_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CE_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CF_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D0_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D1_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D2_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D3_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D4_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D5_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D6_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D7_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D8_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D9_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DA_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DB_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DC_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DD_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DE_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DF_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E0_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E1_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E2_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E3_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E4_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E5_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E6_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E7_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E8_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E9_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EA_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EB_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EC_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01ED_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EE_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EF_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F0_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F1_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F2_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F3_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F4_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F5_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F6_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F7_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F8_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F9_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FA_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FB_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FC_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FD_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FE_PID) },
+ { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FF_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a72f2c81d664..06e0ecabb3eb 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -114,11 +114,268 @@
#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
/*
- * The following are the values for the Matrix Orbital VK204-25-USB
- * display, which use the FT232RL.
- */
-#define MTXORB_VK_VID 0x1b3d
-#define MTXORB_VK_PID 0x0158
+ * The following are the values for the Matrix Orbital FTDI Range
+ * Anything in this range will use an FT232RL.
+ */
+#define MTXORB_VID 0x1B3D
+#define MTXORB_FTDI_RANGE_0100_PID 0x0100
+#define MTXORB_FTDI_RANGE_0101_PID 0x0101
+#define MTXORB_FTDI_RANGE_0102_PID 0x0102
+#define MTXORB_FTDI_RANGE_0103_PID 0x0103
+#define MTXORB_FTDI_RANGE_0104_PID 0x0104
+#define MTXORB_FTDI_RANGE_0105_PID 0x0105
+#define MTXORB_FTDI_RANGE_0106_PID 0x0106
+#define MTXORB_FTDI_RANGE_0107_PID 0x0107
+#define MTXORB_FTDI_RANGE_0108_PID 0x0108
+#define MTXORB_FTDI_RANGE_0109_PID 0x0109
+#define MTXORB_FTDI_RANGE_010A_PID 0x010A
+#define MTXORB_FTDI_RANGE_010B_PID 0x010B
+#define MTXORB_FTDI_RANGE_010C_PID 0x010C
+#define MTXORB_FTDI_RANGE_010D_PID 0x010D
+#define MTXORB_FTDI_RANGE_010E_PID 0x010E
+#define MTXORB_FTDI_RANGE_010F_PID 0x010F
+#define MTXORB_FTDI_RANGE_0110_PID 0x0110
+#define MTXORB_FTDI_RANGE_0111_PID 0x0111
+#define MTXORB_FTDI_RANGE_0112_PID 0x0112
+#define MTXORB_FTDI_RANGE_0113_PID 0x0113
+#define MTXORB_FTDI_RANGE_0114_PID 0x0114
+#define MTXORB_FTDI_RANGE_0115_PID 0x0115
+#define MTXORB_FTDI_RANGE_0116_PID 0x0116
+#define MTXORB_FTDI_RANGE_0117_PID 0x0117
+#define MTXORB_FTDI_RANGE_0118_PID 0x0118
+#define MTXORB_FTDI_RANGE_0119_PID 0x0119
+#define MTXORB_FTDI_RANGE_011A_PID 0x011A
+#define MTXORB_FTDI_RANGE_011B_PID 0x011B
+#define MTXORB_FTDI_RANGE_011C_PID 0x011C
+#define MTXORB_FTDI_RANGE_011D_PID 0x011D
+#define MTXORB_FTDI_RANGE_011E_PID 0x011E
+#define MTXORB_FTDI_RANGE_011F_PID 0x011F
+#define MTXORB_FTDI_RANGE_0120_PID 0x0120
+#define MTXORB_FTDI_RANGE_0121_PID 0x0121
+#define MTXORB_FTDI_RANGE_0122_PID 0x0122
+#define MTXORB_FTDI_RANGE_0123_PID 0x0123
+#define MTXORB_FTDI_RANGE_0124_PID 0x0124
+#define MTXORB_FTDI_RANGE_0125_PID 0x0125
+#define MTXORB_FTDI_RANGE_0126_PID 0x0126
+#define MTXORB_FTDI_RANGE_0127_PID 0x0127
+#define MTXORB_FTDI_RANGE_0128_PID 0x0128
+#define MTXORB_FTDI_RANGE_0129_PID 0x0129
+#define MTXORB_FTDI_RANGE_012A_PID 0x012A
+#define MTXORB_FTDI_RANGE_012B_PID 0x012B
+#define MTXORB_FTDI_RANGE_012C_PID 0x012C
+#define MTXORB_FTDI_RANGE_012D_PID 0x012D
+#define MTXORB_FTDI_RANGE_012E_PID 0x012E
+#define MTXORB_FTDI_RANGE_012F_PID 0x012F
+#define MTXORB_FTDI_RANGE_0130_PID 0x0130
+#define MTXORB_FTDI_RANGE_0131_PID 0x0131
+#define MTXORB_FTDI_RANGE_0132_PID 0x0132
+#define MTXORB_FTDI_RANGE_0133_PID 0x0133
+#define MTXORB_FTDI_RANGE_0134_PID 0x0134
+#define MTXORB_FTDI_RANGE_0135_PID 0x0135
+#define MTXORB_FTDI_RANGE_0136_PID 0x0136
+#define MTXORB_FTDI_RANGE_0137_PID 0x0137
+#define MTXORB_FTDI_RANGE_0138_PID 0x0138
+#define MTXORB_FTDI_RANGE_0139_PID 0x0139
+#define MTXORB_FTDI_RANGE_013A_PID 0x013A
+#define MTXORB_FTDI_RANGE_013B_PID 0x013B
+#define MTXORB_FTDI_RANGE_013C_PID 0x013C
+#define MTXORB_FTDI_RANGE_013D_PID 0x013D
+#define MTXORB_FTDI_RANGE_013E_PID 0x013E
+#define MTXORB_FTDI_RANGE_013F_PID 0x013F
+#define MTXORB_FTDI_RANGE_0140_PID 0x0140
+#define MTXORB_FTDI_RANGE_0141_PID 0x0141
+#define MTXORB_FTDI_RANGE_0142_PID 0x0142
+#define MTXORB_FTDI_RANGE_0143_PID 0x0143
+#define MTXORB_FTDI_RANGE_0144_PID 0x0144
+#define MTXORB_FTDI_RANGE_0145_PID 0x0145
+#define MTXORB_FTDI_RANGE_0146_PID 0x0146
+#define MTXORB_FTDI_RANGE_0147_PID 0x0147
+#define MTXORB_FTDI_RANGE_0148_PID 0x0148
+#define MTXORB_FTDI_RANGE_0149_PID 0x0149
+#define MTXORB_FTDI_RANGE_014A_PID 0x014A
+#define MTXORB_FTDI_RANGE_014B_PID 0x014B
+#define MTXORB_FTDI_RANGE_014C_PID 0x014C
+#define MTXORB_FTDI_RANGE_014D_PID 0x014D
+#define MTXORB_FTDI_RANGE_014E_PID 0x014E
+#define MTXORB_FTDI_RANGE_014F_PID 0x014F
+#define MTXORB_FTDI_RANGE_0150_PID 0x0150
+#define MTXORB_FTDI_RANGE_0151_PID 0x0151
+#define MTXORB_FTDI_RANGE_0152_PID 0x0152
+#define MTXORB_FTDI_RANGE_0153_PID 0x0153
+#define MTXORB_FTDI_RANGE_0154_PID 0x0154
+#define MTXORB_FTDI_RANGE_0155_PID 0x0155
+#define MTXORB_FTDI_RANGE_0156_PID 0x0156
+#define MTXORB_FTDI_RANGE_0157_PID 0x0157
+#define MTXORB_FTDI_RANGE_0158_PID 0x0158
+#define MTXORB_FTDI_RANGE_0159_PID 0x0159
+#define MTXORB_FTDI_RANGE_015A_PID 0x015A
+#define MTXORB_FTDI_RANGE_015B_PID 0x015B
+#define MTXORB_FTDI_RANGE_015C_PID 0x015C
+#define MTXORB_FTDI_RANGE_015D_PID 0x015D
+#define MTXORB_FTDI_RANGE_015E_PID 0x015E
+#define MTXORB_FTDI_RANGE_015F_PID 0x015F
+#define MTXORB_FTDI_RANGE_0160_PID 0x0160
+#define MTXORB_FTDI_RANGE_0161_PID 0x0161
+#define MTXORB_FTDI_RANGE_0162_PID 0x0162
+#define MTXORB_FTDI_RANGE_0163_PID 0x0163
+#define MTXORB_FTDI_RANGE_0164_PID 0x0164
+#define MTXORB_FTDI_RANGE_0165_PID 0x0165
+#define MTXORB_FTDI_RANGE_0166_PID 0x0166
+#define MTXORB_FTDI_RANGE_0167_PID 0x0167
+#define MTXORB_FTDI_RANGE_0168_PID 0x0168
+#define MTXORB_FTDI_RANGE_0169_PID 0x0169
+#define MTXORB_FTDI_RANGE_016A_PID 0x016A
+#define MTXORB_FTDI_RANGE_016B_PID 0x016B
+#define MTXORB_FTDI_RANGE_016C_PID 0x016C
+#define MTXORB_FTDI_RANGE_016D_PID 0x016D
+#define MTXORB_FTDI_RANGE_016E_PID 0x016E
+#define MTXORB_FTDI_RANGE_016F_PID 0x016F
+#define MTXORB_FTDI_RANGE_0170_PID 0x0170
+#define MTXORB_FTDI_RANGE_0171_PID 0x0171
+#define MTXORB_FTDI_RANGE_0172_PID 0x0172
+#define MTXORB_FTDI_RANGE_0173_PID 0x0173
+#define MTXORB_FTDI_RANGE_0174_PID 0x0174
+#define MTXORB_FTDI_RANGE_0175_PID 0x0175
+#define MTXORB_FTDI_RANGE_0176_PID 0x0176
+#define MTXORB_FTDI_RANGE_0177_PID 0x0177
+#define MTXORB_FTDI_RANGE_0178_PID 0x0178
+#define MTXORB_FTDI_RANGE_0179_PID 0x0179
+#define MTXORB_FTDI_RANGE_017A_PID 0x017A
+#define MTXORB_FTDI_RANGE_017B_PID 0x017B
+#define MTXORB_FTDI_RANGE_017C_PID 0x017C
+#define MTXORB_FTDI_RANGE_017D_PID 0x017D
+#define MTXORB_FTDI_RANGE_017E_PID 0x017E
+#define MTXORB_FTDI_RANGE_017F_PID 0x017F
+#define MTXORB_FTDI_RANGE_0180_PID 0x0180
+#define MTXORB_FTDI_RANGE_0181_PID 0x0181
+#define MTXORB_FTDI_RANGE_0182_PID 0x0182
+#define MTXORB_FTDI_RANGE_0183_PID 0x0183
+#define MTXORB_FTDI_RANGE_0184_PID 0x0184
+#define MTXORB_FTDI_RANGE_0185_PID 0x0185
+#define MTXORB_FTDI_RANGE_0186_PID 0x0186
+#define MTXORB_FTDI_RANGE_0187_PID 0x0187
+#define MTXORB_FTDI_RANGE_0188_PID 0x0188
+#define MTXORB_FTDI_RANGE_0189_PID 0x0189
+#define MTXORB_FTDI_RANGE_018A_PID 0x018A
+#define MTXORB_FTDI_RANGE_018B_PID 0x018B
+#define MTXORB_FTDI_RANGE_018C_PID 0x018C
+#define MTXORB_FTDI_RANGE_018D_PID 0x018D
+#define MTXORB_FTDI_RANGE_018E_PID 0x018E
+#define MTXORB_FTDI_RANGE_018F_PID 0x018F
+#define MTXORB_FTDI_RANGE_0190_PID 0x0190
+#define MTXORB_FTDI_RANGE_0191_PID 0x0191
+#define MTXORB_FTDI_RANGE_0192_PID 0x0192
+#define MTXORB_FTDI_RANGE_0193_PID 0x0193
+#define MTXORB_FTDI_RANGE_0194_PID 0x0194
+#define MTXORB_FTDI_RANGE_0195_PID 0x0195
+#define MTXORB_FTDI_RANGE_0196_PID 0x0196
+#define MTXORB_FTDI_RANGE_0197_PID 0x0197
+#define MTXORB_FTDI_RANGE_0198_PID 0x0198
+#define MTXORB_FTDI_RANGE_0199_PID 0x0199
+#define MTXORB_FTDI_RANGE_019A_PID 0x019A
+#define MTXORB_FTDI_RANGE_019B_PID 0x019B
+#define MTXORB_FTDI_RANGE_019C_PID 0x019C
+#define MTXORB_FTDI_RANGE_019D_PID 0x019D
+#define MTXORB_FTDI_RANGE_019E_PID 0x019E
+#define MTXORB_FTDI_RANGE_019F_PID 0x019F
+#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
+#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
+#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
+#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
+#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
+#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
+#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
+#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
+#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
+#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
+#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
+#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
+#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
+#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
+#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
+#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
+#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
+#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
+#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
+#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
+#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
+#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
+#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
+#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
+#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
+#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
+#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
+#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
+#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
+#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
+#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
+#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
+#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
+#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
+#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
+#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
+#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
+#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
+#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
+#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
+#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
+#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
+#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
+#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
+#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
+#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
+#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
+#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
+#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
+#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
+#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
+#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
+#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
+#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
+#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
+#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
+#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
+#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
+#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
+#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
+#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
+#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
+#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
+#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
+#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
+#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
+#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
+#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
+#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
+#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
+#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
+#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
+#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
+#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
+#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
+#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
+#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
+#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
+#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
+#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
+#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
+#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
+#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
+#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
+#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
+#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
+#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
+#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
+#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
+#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
+#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
+#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
+#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
+#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
+#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
+#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
+
+
/* Interbiometrics USB I/O Board */
/* Developed for Interbiometrics by Rudolf Gugler */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6cecd2c12b1d..43cfde83a93b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,25 +236,25 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_NETWORK_EX) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_NETWORK) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 234c5eea95a2..103195abd417 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -56,6 +56,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3bdefe020501..cff160abb130 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -14,6 +14,7 @@
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
#define PL2303_PRODUCT_ID_ALDIGA 0x0611
+#define PL2303_PRODUCT_ID_MMX 0x0612
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1b09578cbb10..45fe3663fa7f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -405,7 +405,7 @@ UNUSUAL_DEV( 0x04a5, 0x3010, 0x0100, 0x0100,
UNUSUAL_DEV( 0x04b4, 0x6830, 0x0000, 0x9999,
"Cypress",
"Cypress AT2LP",
- US_SC_CYP_ATACB, US_PR_BULK, NULL,
+ US_SC_CYP_ATACB, US_PR_DEVICE, NULL,
0),
#endif
@@ -1522,7 +1522,7 @@ UNUSUAL_DEV( 0x0fce, 0xe031, 0x0000, 0x0000,
"Sony Ericsson",
"M600i",
US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
+ US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
* Tested on hardware version 1.10.
@@ -1716,10 +1716,12 @@ UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001,
/*
* Patch by Pete Zaitcev <zaitcev@redhat.com>
* Report by Mark Patton. Red Hat bz#208928.
+ * Added support for rev 0x0002 (Motorola ROKR W5)
+ * by Javier Smaldone <javier@smaldone.com.ar>
*/
-UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0001,
+UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002,
"Motorola",
- "RAZR V3i",
+ "RAZR V3i/ROKR W5",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index f85b19625f97..30d09cbbad94 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -221,8 +221,7 @@ geodewdt_probe(struct platform_device *dev)
{
int ret, timer;
- timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY,
- MFGPT_DOMAIN_WORKING, THIS_MODULE);
+ timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
if (timer == -1) {
printk(KERN_ERR "geodewdt: No timers were available\n");
diff --git a/include/asm-ia64/patch.h b/include/asm-ia64/patch.h
index a71543084fb4..295fe6ab4584 100644
--- a/include/asm-ia64/patch.h
+++ b/include/asm-ia64/patch.h
@@ -21,6 +21,7 @@ extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel
extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
extern void ia64_patch_vtop (unsigned long start, unsigned long end);
extern void ia64_patch_phys_stack_reg(unsigned long val);
+extern void ia64_patch_rse (unsigned long start, unsigned long end);
extern void ia64_patch_gate (void);
#endif /* _ASM_IA64_PATCH_H */
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index 4b2a8d40ebc5..15f8dcfe6eee 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -76,7 +76,7 @@
# define KERNEL_STACK_SIZE_ORDER 0
#endif
-#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15)
+#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
#define KERNEL_STACK_SIZE IA64_STK_OFFSET
diff --git a/include/asm-ia64/sections.h b/include/asm-ia64/sections.h
index dc42a359894f..7286e4a9fe84 100644
--- a/include/asm-ia64/sections.h
+++ b/include/asm-ia64/sections.h
@@ -10,6 +10,7 @@
extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
+extern char __start___rse_patchlist[], __end___rse_patchlist[];
extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
extern char __start_gate_section[];
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h
index 0e959e20e9a3..41c547656130 100644
--- a/include/asm-s390/types.h
+++ b/include/asm-s390/types.h
@@ -40,7 +40,13 @@ typedef __signed__ long saddr_t;
#ifndef __ASSEMBLY__
+typedef u64 dma64_addr_t;
+#ifdef __s390x__
+/* DMA addresses come in 32-bit and 64-bit flavours. */
+typedef u64 dma_addr_t;
+#else
typedef u32 dma_addr_t;
+#endif
#ifndef __s390x__
typedef union {
diff --git a/include/linux/device.h b/include/linux/device.h
index 14616e80213c..6a2d04c011bc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -385,6 +385,9 @@ static inline const char *dev_name(struct device *dev)
return dev->bus_id;
}
+extern int dev_set_name(struct device *dev, const char *name, ...)
+ __attribute__((format(printf, 2, 3)));
+
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
{
diff --git a/include/linux/input.h b/include/linux/input.h
index 28a094fcfe20..e075c4b762fb 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -637,7 +637,9 @@ struct input_absinfo {
#define SW_LID 0x00 /* set = lid shut */
#define SW_TABLET_MODE 0x01 /* set = tablet mode */
#define SW_HEADPHONE_INSERT 0x02 /* set = inserted */
-#define SW_RADIO 0x03 /* set = radio enabled */
+#define SW_RFKILL_ALL 0x03 /* rfkill master switch, type "any"
+ set = radio enabled */
+#define SW_RADIO SW_RFKILL_ALL /* deprecated */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3e05e5474749..ae0be3c62375 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -766,7 +766,6 @@ struct sched_domain {
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */
- int first_cpu; /* cache of the first cpu in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 4bb7074a2c3a..24f3d2282e11 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -166,7 +166,9 @@ void arch_update_cpu_topology(void);
.busy_idx = 3, \
.idle_idx = 3, \
.flags = SD_LOAD_BALANCE \
- | SD_SERIALIZE, \
+ | SD_BALANCE_NEWIDLE \
+ | SD_WAKE_AFFINE \
+ | SD_SERIALIZE, \
.last_balance = jiffies, \
.balance_interval = 64, \
}
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 4d13732e9cf0..6f69968eab24 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -100,6 +100,7 @@
#define WM9713_ADCSEL_Y 0x0004 /* Y measurement */
#define WM9713_ADCSEL_PRES 0x0008 /* Pressure measurement */
#define WM9713_COO 0x0001 /* enable coordinate mode */
+#define WM9713_45W 0x1000 /* set for 5 wire panel */
#define WM9713_PDEN 0x0800 /* measure only when pen down */
#define WM9713_ADCSEL_MASK 0x00fe /* ADC selection mask */
#define WM9713_WAIT 0x0200 /* coordinate wait */
diff --git a/kernel/sched.c b/kernel/sched.c
index cfa222a91539..bfb8ad8ed171 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
static inline int rt_policy(int policy)
{
- if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
+ if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
return 1;
return 0;
}
@@ -398,43 +398,6 @@ struct cfs_rq {
*/
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
-
-#ifdef CONFIG_SMP
- unsigned long task_weight;
- unsigned long shares;
- /*
- * We need space to build a sched_domain wide view of the full task
- * group tree, in order to avoid depending on dynamic memory allocation
- * during the load balancing we place this in the per cpu task group
- * hierarchy. This limits the load balancing to one instance per cpu,
- * but more should not be needed anyway.
- */
- struct aggregate_struct {
- /*
- * load = weight(cpus) * f(tg)
- *
- * Where f(tg) is the recursive weight fraction assigned to
- * this group.
- */
- unsigned long load;
-
- /*
- * part of the group weight distributed to this span.
- */
- unsigned long shares;
-
- /*
- * The sum of all runqueue weights within this span.
- */
- unsigned long rq_weight;
-
- /*
- * Weight contributed by tasks; this is the part we can
- * influence by moving tasks around.
- */
- unsigned long task_weight;
- } aggregate;
-#endif
#endif
};
@@ -1368,9 +1331,6 @@ static void __resched_task(struct task_struct *p, int tif_bit)
*/
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
-/*
- * delta *= weight / lw
- */
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw)
@@ -1393,6 +1353,12 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
+static inline unsigned long
+calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
+{
+ return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
+}
+
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
@@ -1505,326 +1471,6 @@ static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static unsigned long cpu_avg_load_per_task(int cpu);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-/*
- * Group load balancing.
- *
- * We calculate a few balance domain wide aggregate numbers; load and weight.
- * Given the pictures below, and assuming each item has equal weight:
- *
- * root 1 - thread
- * / | \ A - group
- * A 1 B
- * /|\ / \
- * C 2 D 3 4
- * | |
- * 5 6
- *
- * load:
- * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd,
- * which equals 1/9-th of the total load.
- *
- * shares:
- * The weight of this group on the selected cpus.
- *
- * rq_weight:
- * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
- * B would get 2.
- *
- * task_weight:
- * Part of the rq_weight contributed by tasks; all groups except B would
- * get 1, B gets 2.
- */
-
-static inline struct aggregate_struct *
-aggregate(struct task_group *tg, struct sched_domain *sd)
-{
- return &tg->cfs_rq[sd->first_cpu]->aggregate;
-}
-
-typedef void (*aggregate_func)(struct task_group *, struct sched_domain *);
-
-/*
- * Iterate the full tree, calling @down when first entering a node and @up when
- * leaving it for the final time.
- */
-static
-void aggregate_walk_tree(aggregate_func down, aggregate_func up,
- struct sched_domain *sd)
-{
- struct task_group *parent, *child;
-
- rcu_read_lock();
- parent = &root_task_group;
-down:
- (*down)(parent, sd);
- list_for_each_entry_rcu(child, &parent->children, siblings) {
- parent = child;
- goto down;
-
-up:
- continue;
- }
- (*up)(parent, sd);
-
- child = parent;
- parent = parent->parent;
- if (parent)
- goto up;
- rcu_read_unlock();
-}
-
-/*
- * Calculate the aggregate runqueue weight.
- */
-static
-void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
-{
- unsigned long rq_weight = 0;
- unsigned long task_weight = 0;
- int i;
-
- for_each_cpu_mask(i, sd->span) {
- rq_weight += tg->cfs_rq[i]->load.weight;
- task_weight += tg->cfs_rq[i]->task_weight;
- }
-
- aggregate(tg, sd)->rq_weight = rq_weight;
- aggregate(tg, sd)->task_weight = task_weight;
-}
-
-/*
- * Compute the weight of this group on the given cpus.
- */
-static
-void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
-{
- unsigned long shares = 0;
- int i;
-
- for_each_cpu_mask(i, sd->span)
- shares += tg->cfs_rq[i]->shares;
-
- if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
- shares = tg->shares;
-
- aggregate(tg, sd)->shares = shares;
-}
-
-/*
- * Compute the load fraction assigned to this group, relies on the aggregate
- * weight and this group's parent's load, i.e. top-down.
- */
-static
-void aggregate_group_load(struct task_group *tg, struct sched_domain *sd)
-{
- unsigned long load;
-
- if (!tg->parent) {
- int i;
-
- load = 0;
- for_each_cpu_mask(i, sd->span)
- load += cpu_rq(i)->load.weight;
-
- } else {
- load = aggregate(tg->parent, sd)->load;
-
- /*
- * shares is our weight in the parent's rq so
- * shares/parent->rq_weight gives our fraction of the load
- */
- load *= aggregate(tg, sd)->shares;
- load /= aggregate(tg->parent, sd)->rq_weight + 1;
- }
-
- aggregate(tg, sd)->load = load;
-}
-
-static void __set_se_shares(struct sched_entity *se, unsigned long shares);
-
-/*
- * Calculate and set the cpu's group shares.
- */
-static void
-__update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
- int tcpu)
-{
- int boost = 0;
- unsigned long shares;
- unsigned long rq_weight;
-
- if (!tg->se[tcpu])
- return;
-
- rq_weight = tg->cfs_rq[tcpu]->load.weight;
-
- /*
- * If there are currently no tasks on the cpu pretend there is one of
- * average load so that when a new task gets to run here it will not
- * get delayed by group starvation.
- */
- if (!rq_weight) {
- boost = 1;
- rq_weight = NICE_0_LOAD;
- }
-
- /*
- * \Sum shares * rq_weight
- * shares = -----------------------
- * \Sum rq_weight
- *
- */
- shares = aggregate(tg, sd)->shares * rq_weight;
- shares /= aggregate(tg, sd)->rq_weight + 1;
-
- /*
- * record the actual number of shares, not the boosted amount.
- */
- tg->cfs_rq[tcpu]->shares = boost ? 0 : shares;
-
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- else if (shares > MAX_SHARES)
- shares = MAX_SHARES;
-
- __set_se_shares(tg->se[tcpu], shares);
-}
-
-/*
- * Re-adjust the weights on the cpu the task came from and on the cpu the
- * task went to.
- */
-static void
-__move_group_shares(struct task_group *tg, struct sched_domain *sd,
- int scpu, int dcpu)
-{
- unsigned long shares;
-
- shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
-
- __update_group_shares_cpu(tg, sd, scpu);
- __update_group_shares_cpu(tg, sd, dcpu);
-
- /*
- * ensure we never loose shares due to rounding errors in the
- * above redistribution.
- */
- shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
- if (shares)
- tg->cfs_rq[dcpu]->shares += shares;
-}
-
-/*
- * Because changing a group's shares changes the weight of the super-group
- * we need to walk up the tree and change all shares until we hit the root.
- */
-static void
-move_group_shares(struct task_group *tg, struct sched_domain *sd,
- int scpu, int dcpu)
-{
- while (tg) {
- __move_group_shares(tg, sd, scpu, dcpu);
- tg = tg->parent;
- }
-}
-
-static
-void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd)
-{
- unsigned long shares = aggregate(tg, sd)->shares;
- int i;
-
- for_each_cpu_mask(i, sd->span) {
- struct rq *rq = cpu_rq(i);
- unsigned long flags;
-
- spin_lock_irqsave(&rq->lock, flags);
- __update_group_shares_cpu(tg, sd, i);
- spin_unlock_irqrestore(&rq->lock, flags);
- }
-
- aggregate_group_shares(tg, sd);
-
- /*
- * ensure we never loose shares due to rounding errors in the
- * above redistribution.
- */
- shares -= aggregate(tg, sd)->shares;
- if (shares) {
- tg->cfs_rq[sd->first_cpu]->shares += shares;
- aggregate(tg, sd)->shares += shares;
- }
-}
-
-/*
- * Calculate the accumulative weight and recursive load of each task group
- * while walking down the tree.
- */
-static
-void aggregate_get_down(struct task_group *tg, struct sched_domain *sd)
-{
- aggregate_group_weight(tg, sd);
- aggregate_group_shares(tg, sd);
- aggregate_group_load(tg, sd);
-}
-
-/*
- * Rebalance the cpu shares while walking back up the tree.
- */
-static
-void aggregate_get_up(struct task_group *tg, struct sched_domain *sd)
-{
- aggregate_group_set_shares(tg, sd);
-}
-
-static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
-
-static void __init init_aggregate(void)
-{
- int i;
-
- for_each_possible_cpu(i)
- spin_lock_init(&per_cpu(aggregate_lock, i));
-}
-
-static int get_aggregate(struct sched_domain *sd)
-{
- if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu)))
- return 0;
-
- aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd);
- return 1;
-}
-
-static void put_aggregate(struct sched_domain *sd)
-{
- spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu));
-}
-
-static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
-{
- cfs_rq->shares = shares;
-}
-
-#else
-
-static inline void init_aggregate(void)
-{
-}
-
-static inline int get_aggregate(struct sched_domain *sd)
-{
- return 0;
-}
-
-static inline void put_aggregate(struct sched_domain *sd)
-{
-}
-#endif
-
#else /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1845,14 +1491,26 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
#define sched_class_highest (&rt_sched_class)
-static void inc_nr_running(struct rq *rq)
+static inline void inc_load(struct rq *rq, const struct task_struct *p)
+{
+ update_load_add(&rq->load, p->se.load.weight);
+}
+
+static inline void dec_load(struct rq *rq, const struct task_struct *p)
+{
+ update_load_sub(&rq->load, p->se.load.weight);
+}
+
+static void inc_nr_running(struct task_struct *p, struct rq *rq)
{
rq->nr_running++;
+ inc_load(rq, p);
}
-static void dec_nr_running(struct rq *rq)
+static void dec_nr_running(struct task_struct *p, struct rq *rq)
{
rq->nr_running--;
+ dec_load(rq, p);
}
static void set_load_weight(struct task_struct *p)
@@ -1944,7 +1602,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(rq);
+ inc_nr_running(p, rq);
}
/*
@@ -1956,7 +1614,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(rq);
+ dec_nr_running(p, rq);
}
/**
@@ -2609,7 +2267,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(rq);
+ inc_nr_running(p, rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
@@ -3600,12 +3258,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
unsigned long imbalance;
struct rq *busiest;
unsigned long flags;
- int unlock_aggregate;
cpus_setall(*cpus);
- unlock_aggregate = get_aggregate(sd);
-
/*
* When power savings policy is enabled for the parent domain, idle
* sibling can pick up load irrespective of busy siblings. In this case,
@@ -3721,9 +3376,8 @@ redo:
if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- ld_moved = -1;
-
- goto out;
+ return -1;
+ return ld_moved;
out_balanced:
schedstat_inc(sd, lb_balanced[idle]);
@@ -3738,13 +3392,8 @@ out_one_pinned:
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- ld_moved = -1;
- else
- ld_moved = 0;
-out:
- if (unlock_aggregate)
- put_aggregate(sd);
- return ld_moved;
+ return -1;
+ return 0;
}
/*
@@ -4430,7 +4079,7 @@ static inline void schedule_debug(struct task_struct *prev)
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
- if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
+ if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
__schedule_bug(prev);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4931,8 +4580,10 @@ void set_user_nice(struct task_struct *p, long nice)
goto out_unlock;
}
on_rq = p->se.on_rq;
- if (on_rq)
+ if (on_rq) {
dequeue_task(rq, p, 0);
+ dec_load(rq, p);
+ }
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
@@ -4942,6 +4593,7 @@ void set_user_nice(struct task_struct *p, long nice)
if (on_rq) {
enqueue_task(rq, p, 0);
+ inc_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -7316,7 +6968,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
sd->span = *cpu_map;
- sd->first_cpu = first_cpu(sd->span);
cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
p = sd;
sd_allnodes = 1;
@@ -7327,7 +6978,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), &sd->span);
- sd->first_cpu = first_cpu(sd->span);
sd->parent = p;
if (p)
p->child = sd;
@@ -7339,7 +6989,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SD_INIT(sd, CPU);
set_domain_attribute(sd, attr);
sd->span = *nodemask;
- sd->first_cpu = first_cpu(sd->span);
sd->parent = p;
if (p)
p->child = sd;
@@ -7351,7 +7000,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SD_INIT(sd, MC);
set_domain_attribute(sd, attr);
sd->span = cpu_coregroup_map(i);
- sd->first_cpu = first_cpu(sd->span);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
@@ -7364,7 +7012,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SD_INIT(sd, SIBLING);
set_domain_attribute(sd, attr);
sd->span = per_cpu(cpu_sibling_map, i);
- sd->first_cpu = first_cpu(sd->span);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
@@ -7568,8 +7215,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
static cpumask_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
-static struct sched_domain_attr *dattr_cur; /* attribues of custom domains
- in 'doms_cur' */
+static struct sched_domain_attr *dattr_cur;
+ /* attribues of custom domains in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
@@ -8034,7 +7681,6 @@ void __init sched_init(void)
}
#ifdef CONFIG_SMP
- init_aggregate();
init_defrootdomain();
#endif
@@ -8599,11 +8245,14 @@ void sched_move_task(struct task_struct *tsk)
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void __set_se_shares(struct sched_entity *se, unsigned long shares)
+static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
+ struct rq *rq = cfs_rq->rq;
int on_rq;
+ spin_lock_irq(&rq->lock);
+
on_rq = se->on_rq;
if (on_rq)
dequeue_entity(cfs_rq, se, 0);
@@ -8613,17 +8262,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
if (on_rq)
enqueue_entity(cfs_rq, se, 0);
-}
-static void set_se_shares(struct sched_entity *se, unsigned long shares)
-{
- struct cfs_rq *cfs_rq = se->cfs_rq;
- struct rq *rq = cfs_rq->rq;
- unsigned long flags;
-
- spin_lock_irqsave(&rq->lock, flags);
- __set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ spin_unlock_irq(&rq->lock);
}
static DEFINE_MUTEX(shares_mutex);
@@ -8662,13 +8302,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
- for_each_possible_cpu(i) {
- /*
- * force a rebalance
- */
- cfs_rq_set_shares(tg->cfs_rq[i], 0);
+ for_each_possible_cpu(i)
set_se_shares(tg->se[i], shares);
- }
/*
* Enable load balance activity on this group, by inserting it back on
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 9c597e37f7de..ce05271219ab 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -59,22 +59,26 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
return &per_cpu(sched_clock_data, cpu);
}
+static __read_mostly int sched_clock_running;
+
void sched_clock_init(void)
{
u64 ktime_now = ktime_to_ns(ktime_get());
- u64 now = 0;
+ unsigned long now_jiffies = jiffies;
int cpu;
for_each_possible_cpu(cpu) {
struct sched_clock_data *scd = cpu_sdc(cpu);
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
- scd->prev_jiffies = jiffies;
- scd->prev_raw = now;
- scd->tick_raw = now;
+ scd->prev_jiffies = now_jiffies;
+ scd->prev_raw = 0;
+ scd->tick_raw = 0;
scd->tick_gtod = ktime_now;
scd->clock = ktime_now;
}
+
+ sched_clock_running = 1;
}
/*
@@ -136,6 +140,9 @@ u64 sched_clock_cpu(int cpu)
struct sched_clock_data *scd = cpu_sdc(cpu);
u64 now, clock;
+ if (unlikely(!sched_clock_running))
+ return 0ull;
+
WARN_ON_ONCE(!irqs_disabled());
now = sched_clock();
@@ -174,6 +181,9 @@ void sched_clock_tick(void)
struct sched_clock_data *scd = this_scd();
u64 now, now_gtod;
+ if (unlikely(!sched_clock_running))
+ return;
+
WARN_ON_ONCE(!irqs_disabled());
now = sched_clock();
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5f06118fbc31..8bb713040ac9 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -167,11 +167,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
cfs_rq->nr_spread_over);
-#ifdef CONFIG_FAIR_GROUP_SCHED
-#ifdef CONFIG_SMP
- SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
-#endif
-#endif
}
static void print_cpu(struct seq_file *m, int cpu)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e24ecd39c4b8..08ae848b71d4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -334,34 +334,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
#endif
/*
- * delta *= w / rw
- */
-static inline unsigned long
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- delta = calc_delta_mine(delta,
- se->load.weight, &cfs_rq_of(se)->load);
- }
-
- return delta;
-}
-
-/*
- * delta *= rw / w
- */
-static inline unsigned long
-calc_delta_fair(unsigned long delta, struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- delta = calc_delta_mine(delta,
- cfs_rq_of(se)->load.weight, &se->load);
- }
-
- return delta;
-}
-
-/*
* The idea is to set a period in which each task runs once.
*
* When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
@@ -390,54 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
*/
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
+ u64 slice = __sched_period(cfs_rq->nr_running);
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ slice *= se->load.weight;
+ do_div(slice, cfs_rq->load.weight);
+ }
+
+
+ return slice;
}
/*
* We calculate the vruntime slice of a to be inserted task
*
- * vs = s*rw/w = p
+ * vs = s/w = p/rw
*/
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
unsigned long nr_running = cfs_rq->nr_running;
+ unsigned long weight;
+ u64 vslice;
if (!se->on_rq)
nr_running++;
- return __sched_period(nr_running);
-}
-
-/*
- * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
- * that it favours >=0 over <0.
- *
- * -20 |
- * |
- * 0 --------+-------
- * .'
- * 19 .'
- *
- */
-static unsigned long
-calc_delta_asym(unsigned long delta, struct sched_entity *se)
-{
- struct load_weight lw = {
- .weight = NICE_0_LOAD,
- .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
- };
+ vslice = __sched_period(nr_running);
for_each_sched_entity(se) {
- struct load_weight *se_lw = &se->load;
+ cfs_rq = cfs_rq_of(se);
- if (se->load.weight < NICE_0_LOAD)
- se_lw = &lw;
+ weight = cfs_rq->load.weight;
+ if (!se->on_rq)
+ weight += se->load.weight;
- delta = calc_delta_mine(delta,
- cfs_rq_of(se)->load.weight, se_lw);
+ vslice *= NICE_0_LOAD;
+ do_div(vslice, weight);
}
- return delta;
+ return vslice;
}
/*
@@ -454,7 +419,11 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
- delta_exec_weighted = calc_delta_fair(delta_exec, curr);
+ delta_exec_weighted = delta_exec;
+ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
+ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
+ &curr->load);
+ }
curr->vruntime += delta_exec_weighted;
}
@@ -541,27 +510,10 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Scheduling class queueing methods:
*/
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
-static void
-add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
-{
- cfs_rq->task_weight += weight;
-}
-#else
-static inline void
-add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
-{
-}
-#endif
-
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- inc_cpu_load(rq_of(cfs_rq), se->load.weight);
- if (entity_is_task(se))
- add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
list_add(&se->group_node, &cfs_rq->tasks);
@@ -571,10 +523,6 @@ static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- dec_cpu_load(rq_of(cfs_rq), se->load.weight);
- if (entity_is_task(se))
- add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
list_del_init(&se->group_node);
@@ -661,17 +609,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS)) {
- unsigned long thresh = sysctl_sched_latency;
-
- /*
- * convert the sleeper threshold into virtual time
- */
- if (sched_feat(NORMALIZED_SLEEPER))
- thresh = calc_delta_fair(thresh, se);
-
- vruntime -= thresh;
- }
+ if (sched_feat(NEW_FAIR_SLEEPERS))
+ vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime);
@@ -1057,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *curr = this_rq->curr;
unsigned long tl = this_load;
unsigned long tl_per_task;
+ int balanced;
- if (!(this_sd->flags & SD_WAKE_AFFINE))
+ if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
return 0;
/*
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+ * of the current CPU:
+ */
+ if (sync)
+ tl -= current->se.load.weight;
+
+ balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
+
+ /*
* If the currently running task will sleep within
* a reasonable amount of time then attract this newly
* woken task:
*/
- if (sync && curr->sched_class == &fair_sched_class) {
+ if (sync && balanced && curr->sched_class == &fair_sched_class) {
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
p->se.avg_overlap < sysctl_sched_migration_cost)
return 1;
@@ -1075,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
- /*
- * If sync wakeup then subtract the (maximum possible)
- * effect of the currently running task from the load
- * of the current CPU:
- */
- if (sync)
- tl -= current->se.load.weight;
-
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
- 100*(tl + p->se.load.weight) <= imbalance*load) {
+ balanced) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
@@ -1169,10 +1111,11 @@ static unsigned long wakeup_gran(struct sched_entity *se)
unsigned long gran = sysctl_sched_wakeup_granularity;
/*
- * More easily preempt - nice tasks, while not making it harder for
- * + nice tasks.
+ * More easily preempt - nice tasks, while not making
+ * it harder for + nice tasks.
*/
- gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
+ if (unlikely(se->load.weight > NICE_0_LOAD))
+ gran = calc_delta_fair(gran, &se->load);
return gran;
}
@@ -1366,90 +1309,75 @@ static struct task_struct *load_balance_next_fair(void *arg)
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
}
-static unsigned long
-__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
- unsigned long max_load_move, struct sched_domain *sd,
- enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
- struct cfs_rq *cfs_rq)
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
{
- struct rq_iterator cfs_rq_iterator;
+ struct sched_entity *curr;
+ struct task_struct *p;
- cfs_rq_iterator.start = load_balance_start_fair;
- cfs_rq_iterator.next = load_balance_next_fair;
- cfs_rq_iterator.arg = cfs_rq;
+ if (!cfs_rq->nr_running || !first_fair(cfs_rq))
+ return MAX_PRIO;
+
+ curr = cfs_rq->curr;
+ if (!curr)
+ curr = __pick_next_entity(cfs_rq);
+
+ p = task_of(curr);
- return balance_tasks(this_rq, this_cpu, busiest,
- max_load_move, sd, idle, all_pinned,
- this_best_prio, &cfs_rq_iterator);
+ return p->prio;
}
+#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
+ struct cfs_rq *busy_cfs_rq;
long rem_load_move = max_load_move;
- int busiest_cpu = cpu_of(busiest);
- struct task_group *tg;
-
- rcu_read_lock();
- list_for_each_entry(tg, &task_groups, list) {
- long imbalance;
- unsigned long this_weight, busiest_weight;
- long rem_load, max_load, moved_load;
-
- /*
- * empty group
- */
- if (!aggregate(tg, sd)->task_weight)
- continue;
-
- rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
- rem_load /= aggregate(tg, sd)->load + 1;
-
- this_weight = tg->cfs_rq[this_cpu]->task_weight;
- busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
+ struct rq_iterator cfs_rq_iterator;
- imbalance = (busiest_weight - this_weight) / 2;
+ cfs_rq_iterator.start = load_balance_start_fair;
+ cfs_rq_iterator.next = load_balance_next_fair;
- if (imbalance < 0)
- imbalance = busiest_weight;
+ for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct cfs_rq *this_cfs_rq;
+ long imbalance;
+ unsigned long maxload;
- max_load = max(rem_load, imbalance);
- moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
- max_load, sd, idle, all_pinned, this_best_prio,
- tg->cfs_rq[busiest_cpu]);
+ this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
- if (!moved_load)
+ imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
+ /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
+ if (imbalance <= 0)
continue;
- move_group_shares(tg, sd, busiest_cpu, this_cpu);
+ /* Don't pull more than imbalance/2 */
+ imbalance /= 2;
+ maxload = min(rem_load_move, imbalance);
- moved_load *= aggregate(tg, sd)->load;
- moved_load /= aggregate(tg, sd)->rq_weight + 1;
+ *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+#else
+# define maxload rem_load_move
+#endif
+ /*
+ * pass busy_cfs_rq argument into
+ * load_balance_[start|next]_fair iterators
+ */
+ cfs_rq_iterator.arg = busy_cfs_rq;
+ rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
+ maxload, sd, idle, all_pinned,
+ this_best_prio,
+ &cfs_rq_iterator);
- rem_load_move -= moved_load;
- if (rem_load_move < 0)
+ if (rem_load_move <= 0)
break;
}
- rcu_read_unlock();
return max_load_move - rem_load_move;
}
-#else
-static unsigned long
-load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
- unsigned long max_load_move,
- struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned, int *this_best_prio)
-{
- return __load_balance_fair(this_rq, this_cpu, busiest,
- max_load_move, sd, idle, all_pinned,
- this_best_prio, &busiest->cfs);
-}
-#endif
static int
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 060e87b0cb1c..3432d573205d 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -513,8 +513,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
*/
for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se);
-
- inc_cpu_load(rq, p->se.load.weight);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -534,8 +532,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se);
}
-
- dec_cpu_load(rq, p->se.load.weight);
}
/*
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 5bae2e0c3ff2..a38878e0e49d 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
preempt_enable();
#endif
}
+ kfree(mask_str);
return 0;
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index eb62558e9b09..0c2c93735e93 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -423,8 +423,8 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
rfcomm_dlc_lock(d);
d->state = BT_CLOSED;
- rfcomm_dlc_unlock(d);
d->state_change(d, err);
+ rfcomm_dlc_unlock(d);
skb_queue_purge(&d->tx_queue);
rfcomm_dlc_unlink(d);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index cd61dea2eea1..f813077234b7 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -193,22 +193,17 @@ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
/*
* Update Window Counter using the algorithm from [RFC 4342, 8.1].
- * The algorithm is not applicable if RTT < 4 microseconds.
+ * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt().
*/
static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
ktime_t now)
{
- u32 quarter_rtts;
-
- if (unlikely(hctx->ccid3hctx_rtt < 4)) /* avoid divide-by-zero */
- return;
-
- quarter_rtts = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count);
- quarter_rtts /= hctx->ccid3hctx_rtt / 4;
+ u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count),
+ quarter_rtts = (4 * delta) / hctx->ccid3hctx_rtt;
if (quarter_rtts > 0) {
hctx->ccid3hctx_t_last_win_count = now;
- hctx->ccid3hctx_last_win_count += min_t(u32, quarter_rtts, 5);
+ hctx->ccid3hctx_last_win_count += min(quarter_rtts, 5U);
hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */
}
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b348dd70c685..c22a3780c14e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -739,8 +739,8 @@ int dccp_invalid_packet(struct sk_buff *skb)
* If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
* has short sequence numbers), drop packet and return
*/
- if (dh->dccph_type >= DCCP_PKT_DATA &&
- dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0) {
+ if ((dh->dccph_type < DCCP_PKT_DATA ||
+ dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) {
DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
dccp_packet_name(dh->dccph_type));
return 1;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index e2ddde755019..008de1fc42ca 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -286,12 +286,14 @@ void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
*
* Sends received pdus to the sap state machine.
*/
-static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb)
+static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
+ struct sock *sk)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
ev->type = LLC_SAP_EV_TYPE_PDU;
ev->reason = 0;
+ skb->sk = sk;
llc_sap_state_process(sap, skb);
}
@@ -360,8 +362,7 @@ static void llc_sap_mcast(struct llc_sap *sap,
break;
sock_hold(sk);
- skb_set_owner_r(skb1, sk);
- llc_sap_rcv(sap, skb1);
+ llc_sap_rcv(sap, skb1, sk);
sock_put(sk);
}
read_unlock_bh(&sap->sk_list.lock);
@@ -381,8 +382,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb)
} else {
struct sock *sk = llc_lookup_dgram(sap, &laddr);
if (sk) {
- skb_set_owner_r(skb, sk);
- llc_sap_rcv(sap, skb);
+ llc_sap_rcv(sap, skb, sk);
sock_put(sk);
} else
kfree_skb(skb);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 699d97b8de5e..a9fce4afdf21 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -672,7 +672,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (params->vlan) {
sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
- if (sdata->vif.type != IEEE80211_IF_TYPE_VLAN ||
+ if (sdata->vif.type != IEEE80211_IF_TYPE_VLAN &&
sdata->vif.type != IEEE80211_IF_TYPE_AP)
return -EINVAL;
} else
@@ -760,7 +760,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
if (params->vlan && params->vlan != sta->sdata->dev) {
vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
- if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN ||
+ if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN &&
vlansdata->vif.type != IEEE80211_IF_TYPE_AP) {
rcu_read_unlock();
return -EINVAL;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 915afadb0602..5c876450b14c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1313,7 +1313,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
/*
* Clear the TX filter mask for this STA when sending the next
* packet. If the STA went to power save mode, this will happen
- * happen when it wakes up for the next time.
+ * when it wakes up for the next time.
*/
sta->flags |= WLAN_STA_CLEAR_PS_FILT;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7cfd12e0d1e2..841278f1df8e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1325,7 +1325,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
- kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC);
+ kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
if (!tid_agg_rx->reorder_buf) {
if (net_ratelimit())
printk(KERN_ERR "can not allocate reordering buffer "
@@ -1334,7 +1334,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
goto end;
}
memset(tid_agg_rx->reorder_buf, 0,
- buf_size * sizeof(struct sk_buf *));
+ buf_size * sizeof(struct sk_buff *));
if (local->ops->ampdu_action)
ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
@@ -1614,7 +1614,7 @@ void sta_addba_resp_timer_expired(unsigned long data)
* only one argument, and both sta_info and TID are needed, so init
* flow in sta_info_create gives the TID as data, while the timer_to_id
* array gives the sta through container_of */
- u16 tid = *(int *)data;
+ u16 tid = *(u8 *)data;
struct sta_info *temp_sta = container_of((void *)data,
struct sta_info, timer_to_tid[tid]);
@@ -1662,7 +1662,7 @@ timer_expired_exit:
void sta_rx_agg_session_timer_expired(unsigned long data)
{
/* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and verious sta_info are needed here, so init
+ * only one argument, and various sta_info are needed here, so init
* flow in sta_info_create gives the TID as data, while the timer_to_id
* array gives the sta through container_of */
u8 *ptid = (u8 *)data;
@@ -2479,8 +2479,6 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
ifsta->state = IEEE80211_IBSS_JOINED;
mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
- ieee80211_rx_bss_put(dev, bss);
-
return res;
}
@@ -3523,6 +3521,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
struct ieee80211_supported_band *sband;
u8 bssid[ETH_ALEN], *pos;
int i;
+ int ret;
DECLARE_MAC_BUF(mac);
#if 0
@@ -3567,7 +3566,9 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
*pos++ = (u8) (rate / 5);
}
- return ieee80211_sta_join_ibss(dev, ifsta, bss);
+ ret = ieee80211_sta_join_ibss(dev, ifsta, bss);
+ ieee80211_rx_bss_put(dev, bss);
+ return ret;
}
@@ -3615,10 +3616,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
(bss = ieee80211_rx_bss_get(dev, bssid,
local->hw.conf.channel->center_freq,
ifsta->ssid, ifsta->ssid_len))) {
+ int ret;
printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
" based on configured SSID\n",
dev->name, print_mac(mac, bssid));
- return ieee80211_sta_join_ibss(dev, ifsta, bss);
+ ret = ieee80211_sta_join_ibss(dev, ifsta, bss);
+ ieee80211_rx_bss_put(dev, bss);
+ return ret;
}
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG " did not try to join ibss\n");
@@ -4095,18 +4099,17 @@ ieee80211_sta_scan_result(struct net_device *dev,
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = bss->freq;
- iwe.u.freq.e = 6;
+ iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
+ iwe.u.freq.e = 0;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
IW_EV_FREQ_LEN);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
- iwe.u.freq.e = 0;
+ iwe.u.freq.m = bss->freq;
+ iwe.u.freq.e = 6;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
IW_EV_FREQ_LEN);
-
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVQUAL;
iwe.u.qual.qual = bss->signal;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1958bfb361c6..0941e5d6a522 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1091,7 +1091,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
u16 fc, hdrlen, ethertype;
u8 *payload;
u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
+ u8 src[ETH_ALEN] __aligned(2);
struct sk_buff *skb = rx->skb;
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
DECLARE_MAC_BUF(mac);
@@ -1234,7 +1234,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
*/
static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx)
{
- static const u8 pae_group_addr[ETH_ALEN]
+ static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
= { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 131e9e6c8a32..4e97b266f907 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -34,11 +34,11 @@ void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-const unsigned char rfc1042_header[] =
+const unsigned char rfc1042_header[] __aligned(2) =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-const unsigned char bridge_tunnel_header[] =
+const unsigned char bridge_tunnel_header[] __aligned(2) =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 457ebf9e85ae..8311bb24f9f3 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -489,9 +489,14 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
- ap_addr->sa_family = ARPHRD_ETHER;
- memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
- return 0;
+ if (sdata->u.sta.state == IEEE80211_ASSOCIATED) {
+ ap_addr->sa_family = ARPHRD_ETHER;
+ memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
+ return 0;
+ } else {
+ memset(&ap_addr->sa_data, 0, ETH_ALEN);
+ return 0;
+ }
} else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
ap_addr->sa_family = ARPHRD_ETHER;
memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index e31beeb33b2b..e8f0dead267f 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -587,10 +587,10 @@ int __init nf_conntrack_expect_init(void)
return 0;
err3:
+ kmem_cache_destroy(nf_ct_expect_cachep);
+err2:
nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
nf_ct_expect_hsize);
-err2:
- kmem_cache_destroy(nf_ct_expect_cachep);
err1:
return err;
}