diff options
78 files changed, 824 insertions, 400 deletions
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index c8bce82ddcac..89b1d196ca80 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -246,6 +246,7 @@ class/ devices/ firmware/ net/ +fs/ devices/ contains a filesystem representation of the device tree. It maps directly to the internal kernel device tree, which is a hierarchy of @@ -264,6 +265,10 @@ drivers/ contains a directory for each device driver that is loaded for devices on that particular bus (this assumes that drivers do not span multiple bus types). +fs/ contains a directory for some filesystems. Currently each +filesystem wanting to export attributes must create its own hierarchy +below fs/ (see ./fuse.txt for an example). + More information can driver-model specific features can be found in Documentation/driver-model/. diff --git a/MAINTAINERS b/MAINTAINERS index 4e1e8175eb6d..d6a8e5b434ec 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -421,6 +421,14 @@ L: linux-hams@vger.kernel.org W: http://www.baycom.org/~tom/ham/ham.html S: Maintained +BCM43XX WIRELESS DRIVER +P: Michael Buesch +M: mb@bu3sch.de +P: Stefano Brivio +M: st3@riseup.net +W: http://bcm43xx.berlios.de/ +S: Maintained + BEFS FILE SYSTEM P: Sergey S. Kostyliov M: rathamahata@php4.ru diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S index 338551c7113c..bbdef1be5f95 100644 --- a/arch/alpha/lib/strncpy.S +++ b/arch/alpha/lib/strncpy.S @@ -43,8 +43,8 @@ strncpy: .align 4 $multiword: - subq $24, 1, $2 # clear the final bits in the prev word - or $2, $24, $2 + subq $27, 1, $2 # clear the final bits in the prev word + or $2, $27, $2 zapnot $1, $2, $1 subq $18, 1, $18 @@ -70,8 +70,8 @@ $multiword: bne $18, 0b 1: ldq_u $1, 0($16) # clear the leading bits in the final word - subq $27, 1, $2 - or $2, $27, $2 + subq $24, 1, $2 + or $2, $24, $2 zap $1, $2, $1 stq_u $1, 0($16) diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 95a96275f88a..6f8e84c1c1f2 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -66,7 +66,7 @@ tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) - tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) ifeq ($(CONFIG_AEABI),y) -CFLAGS_ABI :=-mabi=aapcs -mno-thumb-interwork +CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork else CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,) endif diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 2ce0e3a27a45..a601b8b55f35 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -29,7 +29,7 @@ ifneq ($(CONFIG_ARCH_EBSA110),y) obj-y += io.o endif -head-y := head.o +head-y := head$(MMUEXT).o obj-$(CONFIG_DEBUG_LL) += debug.o extra-y := $(head-y) init_task.o vmlinux.lds diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index b093ab8738b5..0bea65864051 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -20,10 +20,11 @@ #include <asm/mach-types.h> #include <asm/procinfo.h> #include <asm/ptrace.h> -#include <asm/constants.h> +#include <asm/thread_info.h> #include <asm/system.h> #define PROCINFO_INITFUNC 12 +#define MACHINFO_TYPE 0 /* * Kernel startup entry point. @@ -79,5 +80,6 @@ __after_proc_init: mov pc, r13 @ clear the BSS and jump @ to start_kernel + .ltorg #include "head-common.S" diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c index febd115dba28..009038c8113e 100644 --- a/arch/arm/vfp/vfpdouble.c +++ b/arch/arm/vfp/vfpdouble.c @@ -197,7 +197,7 @@ u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exce dd, d, exceptions); vfp_put_double(dd, d); } - return exceptions & ~VFP_NAN_FLAG; + return exceptions; } /* diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 22f3da4e0829..37ff8145b5b5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -180,7 +180,7 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) * emulate it. */ } - return exceptions; + return exceptions & ~VFP_NAN_FLAG; } /* diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c index 4ac27f193934..dae2c2f46052 100644 --- a/arch/arm/vfp/vfpsingle.c +++ b/arch/arm/vfp/vfpsingle.c @@ -203,7 +203,7 @@ u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exce vfp_put_float(sd, d); } - return exceptions & ~VFP_NAN_FLAG; + return exceptions; } /* diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 9df87b03612c..c8547a6fa7e6 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -642,7 +642,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) return; } -static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, +static int cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e30798811216..bcb80ca5cf40 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1610,5 +1610,6 @@ sys_call_table: data8 sys_get_robust_list data8 sys_sync_file_range // 1300 data8 sys_tee + data8 sys_vmsplice .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 6386f63c413e..859fb37ff49b 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) } } -static int __devinit palinfo_cpu_callback(struct notifier_block *nfb, +static int palinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 9d5a823479a3..663a186ad194 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = { }; #ifdef CONFIG_HOTPLUG_CPU -static int __devinit +static int salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index b47476d655f1..7da4739f536e 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -429,7 +429,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) * When a cpu is hot-plugged, do a check and initiate * cache kobject if necessary */ -static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, +static int cache_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 7c953bcc5f6a..a7d2bb3cf835 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -356,73 +356,13 @@ asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high, asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf, size_t count, u32 unused, u64 a4, u64 a5) { - ssize_t ret; - struct file * file; - ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); - loff_t pos; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto bad_file; - if (!(file->f_mode & FMODE_READ)) - goto out; - pos = merge_64(a4, a5); - ret = rw_verify_area(READ, file, &pos, count); - if (ret < 0) - goto out; - ret = -EINVAL; - if (!file->f_op || !(read = file->f_op->read)) - goto out; - if (pos < 0) - goto out; - ret = -ESPIPE; - if (!(file->f_mode & FMODE_PREAD)) - goto out; - ret = read(file, buf, count, &pos); - if (ret > 0) - dnotify_parent(file->f_dentry, DN_ACCESS); -out: - fput(file); -bad_file: - return ret; + return sys_pread64(fd, buf, count, merge_64(a4, a5)); } asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf, size_t count, u32 unused, u64 a4, u64 a5) { - ssize_t ret; - struct file * file; - ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); - loff_t pos; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto bad_file; - if (!(file->f_mode & FMODE_WRITE)) - goto out; - pos = merge_64(a4, a5); - ret = rw_verify_area(WRITE, file, &pos, count); - if (ret < 0) - goto out; - ret = -EINVAL; - if (!file->f_op || !(write = file->f_op->write)) - goto out; - if (pos < 0) - goto out; - - ret = -ESPIPE; - if (!(file->f_mode & FMODE_PWRITE)) - goto out; - - ret = write(file, buf, count, &pos); - if (ret > 0) - dnotify_parent(file->f_dentry, DN_MODIFY); -out: - fput(file); -bad_file: - return ret; + return sys_pwrite64(fd, buf, count, merge_64(a4, a5)); } asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 73560ef6f802..ed737cacf92d 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit sysfs_cpu_notify(struct notifier_block *self, +static int sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; @@ -297,7 +297,7 @@ static int __devinit sysfs_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata sysfs_cpu_nb = { +static struct notifier_block sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 8d1522690501..0b98eea73c5e 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -324,6 +324,7 @@ COMPAT_SYS(ppoll) SYSCALL(unshare) SYSCALL(splice) SYSCALL(tee) +SYSCALL(vmsplice) /* * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index deb3afb94484..b283380a2a18 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -318,6 +318,7 @@ void *spu_syscall_table[] = { [__NR_unshare] sys_unshare, [__NR_splice] sys_splice, [__NR_tee] sys_tee, + [__NR_vmsplice] sys_vmsplice, }; long spu_sys_callback(struct spu_syscall_block *s) diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 54d35c130907..9a22434a580c 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -652,7 +652,7 @@ appldata_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata appldata_nb = { +static struct notifier_block appldata_nb = { .notifier_call = appldata_cpu_notify, }; diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index 6f0790e8b6d3..c69fc43cee7b 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) #endif /* Get notified when a cpu comes on/off. Be hotplug friendly. */ -static __cpuinit int +static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index d3ad7d81266d..d13b241ad094 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -482,7 +482,7 @@ static void threshold_remove_device(unsigned int cpu) #endif /* get notified when a cpu comes on/off */ -static __cpuinit int threshold_cpu_callback(struct notifier_block *nfb, +static int threshold_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { /* cpu was unsigned int to begin with */ diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 1755c053fd68..e5041a02e21f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3385,7 +3385,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, } -static struct notifier_block __devinitdata blk_cpu_notifier = { +static struct notifier_block blk_cpu_notifier = { .notifier_call = blk_cpu_notify, }; diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 915810f6237e..8c52421cbc54 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev) return 0; } -static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, +static int topology_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 66719f9d294c..1fa9fa157c12 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -27,6 +27,7 @@ #include <linux/crash_dump.h> #include <linux/backing-dev.h> #include <linux/bootmem.h> +#include <linux/pipe_fs_i.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -578,6 +579,18 @@ static ssize_t write_null(struct file * file, const char __user * buf, return count; } +static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, + struct splice_desc *sd) +{ + return sd->len; +} + +static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out, + loff_t *ppos, size_t len, unsigned int flags) +{ + return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); +} + #ifdef CONFIG_MMU /* * For fun, we are using the MMU for this. @@ -785,6 +798,7 @@ static struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, .write = write_null, + .splice_write = splice_write_null, }; #if defined(CONFIG_ISA) || !defined(__mc68000__) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9759d05b1972..29b2fa5534ae 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1497,7 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_update_policy); -static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, +static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index add8dc4aa7b0..c99e87838f92 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3768,6 +3768,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, ps_page->ps_page[j] = NULL; skb->len += length; skb->data_len += length; + skb->truesize += length; } copydone: diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 7627a75f4f7c..9788b1ef2e7d 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -105,6 +105,7 @@ * 0.50: 20 Jan 2006: Add 8021pq tagging support. * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. * 0.52: 20 Jan 2006: Add MSI/MSIX support. + * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -116,7 +117,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ -#define FORCEDETH_VERSION "0.52" +#define FORCEDETH_VERSION "0.53" #define DRV_NAME "forcedeth" #include <linux/module.h> @@ -160,6 +161,7 @@ #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ #define DEV_HAS_MSI 0x0040 /* device supports MSI */ #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ +#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ enum { NvRegIrqStatus = 0x000, @@ -203,6 +205,8 @@ enum { #define NVREG_MISC1_HD 0x02 #define NVREG_MISC1_FORCE 0x3b0f3c + NvRegMacReset = 0x3c, +#define NVREG_MAC_RESET_ASSERT 0x0F3 NvRegTransmitterControl = 0x084, #define NVREG_XMITCTL_START 0x01 NvRegTransmitterStatus = 0x088, @@ -326,6 +330,10 @@ enum { NvRegMSIXMap0 = 0x3e0, NvRegMSIXMap1 = 0x3e4, NvRegMSIXIrqStatus = 0x3f0, + + NvRegPowerState2 = 0x600, +#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 +#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 }; /* Big endian: should work, but is untested */ @@ -414,7 +422,8 @@ typedef union _ring_type { #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) /* Miscelaneous hardware related defines: */ -#define NV_PCI_REGSZ 0x270 +#define NV_PCI_REGSZ_VER1 0x270 +#define NV_PCI_REGSZ_VER2 0x604 /* various timeout delays: all in usec */ #define NV_TXRX_RESET_DELAY 4 @@ -431,6 +440,7 @@ typedef union _ring_type { #define NV_MIIBUSY_DELAY 50 #define NV_MIIPHY_DELAY 10 #define NV_MIIPHY_DELAYMAX 10000 +#define NV_MAC_RESET_DELAY 64 #define NV_WAKEUPPATTERNS 5 #define NV_WAKEUPMASKENTRIES 4 @@ -552,6 +562,8 @@ struct fe_priv { u32 desc_ver; u32 txrxctl_bits; u32 vlanctl_bits; + u32 driver_data; + u32 register_size; void __iomem *base; @@ -919,6 +931,24 @@ static void nv_txrx_reset(struct net_device *dev) pci_push(base); } +static void nv_mac_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); + writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); + pci_push(base); + udelay(NV_MAC_RESET_DELAY); + writel(0, base + NvRegMacReset); + pci_push(base); + udelay(NV_MAC_RESET_DELAY); + writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); +} + /* * nv_get_stats: dev->get_stats function * Get latest stats value from the nic. @@ -1331,7 +1361,7 @@ static void nv_tx_timeout(struct net_device *dev) dev->name, (unsigned long)np->ring_addr, np->next_tx, np->nic_tx); printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); - for (i=0;i<0x400;i+= 32) { + for (i=0;i<=np->register_size;i+= 32) { printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", i, readl(base + i + 0), readl(base + i + 4), @@ -2488,11 +2518,11 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) } #define FORCEDETH_REGS_VER 1 -#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */ static int nv_get_regs_len(struct net_device *dev) { - return FORCEDETH_REGS_SIZE; + struct fe_priv *np = netdev_priv(dev); + return np->register_size; } static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) @@ -2504,7 +2534,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void regs->version = FORCEDETH_REGS_VER; spin_lock_irq(&np->lock); - for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++) + for (i = 0;i <= np->register_size/sizeof(u32); i++) rbuf[i] = readl(base + i*sizeof(u32)); spin_unlock_irq(&np->lock); } @@ -2608,6 +2638,8 @@ static int nv_open(struct net_device *dev) dprintk(KERN_DEBUG "nv_open: begin\n"); /* 1) erase previous misconfiguration */ + if (np->driver_data & DEV_HAS_POWER_CNTRL) + nv_mac_reset(dev); /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(0, base + NvRegMulticastAddrB); @@ -2878,6 +2910,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i unsigned long addr; u8 __iomem *base; int err, i; + u32 powerstate; dev = alloc_etherdev(sizeof(struct fe_priv)); err = -ENOMEM; @@ -2910,6 +2943,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (err < 0) goto out_disable; + if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL)) + np->register_size = NV_PCI_REGSZ_VER2; + else + np->register_size = NV_PCI_REGSZ_VER1; + err = -EINVAL; addr = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -2918,7 +2956,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i pci_resource_len(pci_dev, i), pci_resource_flags(pci_dev, i)); if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && - pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) { + pci_resource_len(pci_dev, i) >= np->register_size) { addr = pci_resource_start(pci_dev, i); break; } @@ -2929,6 +2967,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_relreg; } + /* copy of driver data */ + np->driver_data = id->driver_data; + /* handle different descriptor versions */ if (id->driver_data & DEV_HAS_HIGH_DMA) { /* packet format 3: supports 40-bit addressing */ @@ -2986,7 +3027,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i } err = -ENOMEM; - np->base = ioremap(addr, NV_PCI_REGSZ); + np->base = ioremap(addr, np->register_size); if (!np->base) goto out_relreg; dev->base_addr = (unsigned long)np->base; @@ -3062,6 +3103,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i writel(0, base + NvRegWakeUpFlags); np->wolenabled = 0; + if (id->driver_data & DEV_HAS_POWER_CNTRL) { + u8 revision_id; + pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); + + /* take phy and nic out of low power mode */ + powerstate = readl(base + NvRegPowerState2); + powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; + if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || + id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && + revision_id >= 0xA3) + powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; + writel(powerstate, base + NvRegPowerState2); + } + if (np->desc_ver == DESC_VER_1) { np->tx_flags = NV_TX_VALID; } else { @@ -3223,19 +3278,19 @@ static struct pci_device_id pci_tbl[] = { }, { /* MCP51 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, }, { /* MCP51 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, }, {0,}, }; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 771e25d8c417..218d31764c52 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -210,7 +210,8 @@ static int gfar_probe(struct platform_device *pdev) goto regs_fail; } - spin_lock_init(&priv->lock); + spin_lock_init(&priv->txlock); + spin_lock_init(&priv->rxlock); platform_set_drvdata(pdev, dev); @@ -515,11 +516,13 @@ void stop_gfar(struct net_device *dev) phy_stop(priv->phydev); /* Lock it down */ - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); + spin_lock(&priv->rxlock); gfar_halt(dev); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock(&priv->rxlock); + spin_unlock_irqrestore(&priv->txlock, flags); /* Free the IRQs */ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { @@ -605,14 +608,15 @@ void gfar_start(struct net_device *dev) tempval |= DMACTRL_INIT_SETTINGS; gfar_write(&priv->regs->dmactrl, tempval); - /* Clear THLT, so that the DMA starts polling now */ - gfar_write(®s->tstat, TSTAT_CLEAR_THALT); - /* Make sure we aren't stopped */ tempval = gfar_read(&priv->regs->dmactrl); tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); gfar_write(&priv->regs->dmactrl, tempval); + /* Clear THLT/RHLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, TSTAT_CLEAR_THALT); + gfar_write(®s->rstat, RSTAT_CLEAR_RHALT); + /* Unmask the interrupts we look for */ gfar_write(®s->imask, IMASK_DEFAULT); } @@ -928,12 +932,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) struct txfcb *fcb = NULL; struct txbd8 *txbdp; u16 status; + unsigned long flags; /* Update transmit stats */ priv->stats.tx_bytes += skb->len; /* Lock priv now */ - spin_lock_irq(&priv->lock); + spin_lock_irqsave(&priv->txlock, flags); /* Point at the first free tx descriptor */ txbdp = priv->cur_tx; @@ -1004,7 +1009,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); /* Unlock priv */ - spin_unlock_irq(&priv->lock); + spin_unlock_irqrestore(&priv->txlock, flags); return 0; } @@ -1049,7 +1054,7 @@ static void gfar_vlan_rx_register(struct net_device *dev, unsigned long flags; u32 tempval; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->rxlock, flags); priv->vlgrp = grp; @@ -1076,7 +1081,7 @@ static void gfar_vlan_rx_register(struct net_device *dev, gfar_write(&priv->regs->rctrl, tempval); } - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->rxlock, flags); } @@ -1085,12 +1090,12 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) struct gfar_private *priv = netdev_priv(dev); unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->rxlock, flags); if (priv->vlgrp) priv->vlgrp->vlan_devices[vid] = NULL; - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->rxlock, flags); } @@ -1179,7 +1184,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); /* Lock priv */ - spin_lock(&priv->lock); + spin_lock(&priv->txlock); bdp = priv->dirty_tx; while ((bdp->status & TXBD_READY) == 0) { /* If dirty_tx and cur_tx are the same, then either the */ @@ -1224,7 +1229,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) else gfar_write(&priv->regs->txic, 0); - spin_unlock(&priv->lock); + spin_unlock(&priv->txlock); return IRQ_HANDLED; } @@ -1305,9 +1310,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) dev_id; struct gfar_private *priv = netdev_priv(dev); - #ifdef CONFIG_GFAR_NAPI u32 tempval; +#else + unsigned long flags; #endif /* Clear IEVENT, so rx interrupt isn't called again @@ -1330,7 +1336,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) } #else - spin_lock(&priv->lock); + spin_lock_irqsave(&priv->rxlock, flags); gfar_clean_rx_ring(dev, priv->rx_ring_size); /* If we are coalescing interrupts, update the timer */ @@ -1341,7 +1347,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) else gfar_write(&priv->regs->rxic, 0); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->rxlock, flags); #endif return IRQ_HANDLED; @@ -1490,13 +1496,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) /* Update the current rxbd pointer to be the next one */ priv->cur_rx = bdp; - /* If no packets have arrived since the - * last one we processed, clear the IEVENT RX and - * BSY bits so that another interrupt won't be - * generated when we set IMASK */ - if (bdp->status & RXBD_EMPTY) - gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); - return howmany; } @@ -1516,7 +1515,7 @@ static int gfar_poll(struct net_device *dev, int *budget) rx_work_limit -= howmany; *budget -= howmany; - if (rx_work_limit >= 0) { + if (rx_work_limit > 0) { netif_rx_complete(dev); /* Clear the halt bit in RSTAT */ @@ -1533,7 +1532,8 @@ static int gfar_poll(struct net_device *dev, int *budget) gfar_write(&priv->regs->rxic, 0); } - return (rx_work_limit < 0) ? 1 : 0; + /* Return 1 if there's more work to do */ + return (rx_work_limit > 0) ? 0 : 1; } #endif @@ -1629,7 +1629,7 @@ static void adjust_link(struct net_device *dev) struct phy_device *phydev = priv->phydev; int new_state = 0; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); if (phydev->link) { u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); @@ -1694,7 +1694,7 @@ static void adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->txlock, flags); } /* Update the hash table based on the current list of multicast diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index d37d5401be6e..127c98cf3336 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -656,43 +656,62 @@ struct gfar { * the buffer descriptor determines the actual condition. */ struct gfar_private { - /* pointers to arrays of skbuffs for tx and rx */ + /* Fields controlled by TX lock */ + spinlock_t txlock; + + /* Pointer to the array of skbuffs */ struct sk_buff ** tx_skbuff; - struct sk_buff ** rx_skbuff; - /* indices pointing to the next free sbk in skb arrays */ + /* next free skb in the array */ u16 skb_curtx; - u16 skb_currx; - /* index of the first skb which hasn't been transmitted - * yet. */ + /* First skb in line to be transmitted */ u16 skb_dirtytx; /* Configuration info for the coalescing features */ unsigned char txcoalescing; unsigned short txcount; unsigned short txtime; + + /* Buffer descriptor pointers */ + struct txbd8 *tx_bd_base; /* First tx buffer descriptor */ + struct txbd8 *cur_tx; /* Next free ring entry */ + struct txbd8 *dirty_tx; /* First buffer in line + to be transmitted */ + unsigned int tx_ring_size; + + /* RX Locked fields */ + spinlock_t rxlock; + + /* skb array and index */ + struct sk_buff ** rx_skbuff; + u16 skb_currx; + + /* RX Coalescing values */ unsigned char rxcoalescing; unsigned short rxcount; unsigned short rxtime; - /* GFAR addresses */ - struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */ - struct txbd8 *tx_bd_base; + struct rxbd8 *rx_bd_base; /* First Rx buffers */ struct rxbd8 *cur_rx; /* Next free rx ring entry */ - struct txbd8 *cur_tx; /* Next free ring entry */ - struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ - struct gfar __iomem *regs; /* Pointer to the GFAR memory mapped Registers */ - u32 __iomem *hash_regs[16]; - int hash_width; - struct net_device_stats stats; /* linux network statistics */ - struct gfar_extra_stats extra_stats; - spinlock_t lock; + + /* RX parameters */ + unsigned int rx_ring_size; unsigned int rx_buffer_size; unsigned int rx_stash_size; unsigned int rx_stash_index; - unsigned int tx_ring_size; - unsigned int rx_ring_size; + + struct vlan_group *vlgrp; + + /* Unprotected fields */ + /* Pointer to the GFAR memory mapped Registers */ + struct gfar __iomem *regs; + + /* Hash registers and their width */ + u32 __iomem *hash_regs[16]; + int hash_width; + + /* global parameters */ unsigned int fifo_threshold; unsigned int fifo_starve; unsigned int fifo_starve_off; @@ -702,13 +721,15 @@ struct gfar_private { extended_hash:1, bd_stash_en:1; unsigned short padding; - struct vlan_group *vlgrp; - /* Info structure initialized by board setup code */ + unsigned int interruptTransmit; unsigned int interruptReceive; unsigned int interruptError; + + /* info structure initialized by platform code */ struct gianfar_platform_data *einfo; + /* PHY stuff */ struct phy_device *phydev; struct mii_bus *mii_bus; int oldspeed; @@ -716,6 +737,10 @@ struct gfar_private { int oldlink; uint32_t msg_enable; + + /* Network Statistics */ + struct net_device_stats stats; + struct gfar_extra_stats extra_stats; }; static inline u32 gfar_read(volatile unsigned __iomem *addr) diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 5de7b2e259dc..d69698c695ef 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -455,10 +455,14 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva /* Halt TX and RX, and process the frames which * have already been received */ - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); + spin_lock(&priv->rxlock); + gfar_halt(dev); gfar_clean_rx_ring(dev, priv->rx_ring_size); - spin_unlock_irqrestore(&priv->lock, flags); + + spin_unlock(&priv->rxlock); + spin_unlock_irqrestore(&priv->txlock, flags); /* Now we take down the rings to rebuild them */ stop_gfar(dev); @@ -488,10 +492,14 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) /* Halt TX and RX, and process the frames which * have already been received */ - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); + spin_lock(&priv->rxlock); + gfar_halt(dev); gfar_clean_rx_ring(dev, priv->rx_ring_size); - spin_unlock_irqrestore(&priv->lock, flags); + + spin_unlock(&priv->rxlock); + spin_unlock_irqrestore(&priv->txlock, flags); /* Now we take down the rings to rebuild them */ stop_gfar(dev); @@ -523,7 +531,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data) if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) return -EOPNOTSUPP; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); gfar_halt(dev); if (data) @@ -532,7 +540,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data) dev->features &= ~NETIF_F_IP_CSUM; gfar_start(dev); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->txlock, flags); return 0; } diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index 51ef181b1368..a6d5c43199cb 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c @@ -82,7 +82,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev, else return count; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->rxlock, flags); /* Set the new stashing value */ priv->bd_stash_en = new_setting; @@ -96,7 +96,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev, gfar_write(&priv->regs->attr, temp); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->rxlock, flags); return count; } @@ -118,7 +118,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev, u32 temp; unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->rxlock, flags); if (length > priv->rx_buffer_size) return count; @@ -142,7 +142,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev, gfar_write(&priv->regs->attr, temp); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->rxlock, flags); return count; } @@ -166,7 +166,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev, u32 temp; unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->rxlock, flags); if (index > priv->rx_stash_size) return count; @@ -180,7 +180,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev, temp |= ATTRELI_EI(index); gfar_write(&priv->regs->attreli, flags); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->rxlock, flags); return count; } @@ -205,7 +205,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev, if (length > GFAR_MAX_FIFO_THRESHOLD) return count; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); priv->fifo_threshold = length; @@ -214,7 +214,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev, temp |= length; gfar_write(&priv->regs->fifo_tx_thr, temp); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->txlock, flags); return count; } @@ -240,7 +240,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev, if (num > GFAR_MAX_FIFO_STARVE) return count; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); priv->fifo_starve = num; @@ -249,7 +249,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev, temp |= num; gfar_write(&priv->regs->fifo_tx_starve, temp); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->txlock, flags); return count; } @@ -274,7 +274,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev, if (num > GFAR_MAX_FIFO_STARVE_OFF) return count; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->txlock, flags); priv->fifo_starve_off = num; @@ -283,7 +283,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev, temp |= num; gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->txlock, flags); return count; } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 67b0eab16589..227df9876a2c 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -51,7 +51,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.1" +#define DRV_VERSION "1.2" #define PFX DRV_NAME " " /* @@ -925,8 +925,7 @@ static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask) skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); if (likely(skb)) { unsigned long p = (unsigned long) skb->data; - skb_reserve(skb, - ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p); + skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p); } return skb; @@ -1686,13 +1685,12 @@ static void sky2_tx_timeout(struct net_device *dev) } -#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* Want receive buffer size to be multiple of 64 bits * and incl room for vlan and truncation */ static inline unsigned sky2_buf_size(int mtu) { - return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8; + return ALIGN(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8; } static int sky2_change_mtu(struct net_device *dev, int new_mtu) @@ -2086,6 +2084,20 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, } } +/* If idle then force a fake soft NAPI poll once a second + * to work around cases where sharing an edge triggered interrupt. + */ +static void sky2_idle(unsigned long arg) +{ + struct net_device *dev = (struct net_device *) arg; + + local_irq_disable(); + if (__netif_rx_schedule_prep(dev)) + __netif_rx_schedule(dev); + local_irq_enable(); +} + + static int sky2_poll(struct net_device *dev0, int *budget) { struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; @@ -2093,6 +2105,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) int work_done = 0; u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + restart_poll: if (unlikely(status & ~Y2_IS_STAT_BMU)) { if (status & Y2_IS_HW_ERR) sky2_hw_intr(hw); @@ -2123,7 +2136,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) } if (status & Y2_IS_STAT_BMU) { - work_done = sky2_status_intr(hw, work_limit); + work_done += sky2_status_intr(hw, work_limit - work_done); *budget -= work_done; dev0->quota -= work_done; @@ -2133,9 +2146,24 @@ static int sky2_poll(struct net_device *dev0, int *budget) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); } - netif_rx_complete(dev0); + mod_timer(&hw->idle_timer, jiffies + HZ); + + local_irq_disable(); + __netif_rx_complete(dev0); status = sky2_read32(hw, B0_Y2_SP_LISR); + + if (unlikely(status)) { + /* More work pending, try and keep going */ + if (__netif_rx_schedule_prep(dev0)) { + __netif_rx_reschedule(dev0, work_done); + status = sky2_read32(hw, B0_Y2_SP_EISR); + local_irq_enable(); + goto restart_poll; + } + } + + local_irq_enable(); return 0; } @@ -2153,8 +2181,6 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) prefetch(&hw->st_le[hw->st_idx]); if (likely(__netif_rx_schedule_prep(dev0))) __netif_rx_schedule(dev0); - else - printk(KERN_DEBUG PFX "irq race detected\n"); return IRQ_HANDLED; } @@ -2193,7 +2219,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) } -static int sky2_reset(struct sky2_hw *hw) +static int __devinit sky2_reset(struct sky2_hw *hw) { u16 status; u8 t8, pmd_type; @@ -3276,6 +3302,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev, sky2_write32(hw, B0_IMSK, Y2_IS_BASE); + setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev); + pci_set_drvdata(pdev, hw); return 0; @@ -3311,13 +3339,15 @@ static void __devexit sky2_remove(struct pci_dev *pdev) if (!hw) return; + del_timer_sync(&hw->idle_timer); + + sky2_write32(hw, B0_IMSK, 0); dev0 = hw->dev[0]; dev1 = hw->dev[1]; if (dev1) unregister_netdev(dev1); unregister_netdev(dev0); - sky2_write32(hw, B0_IMSK, 0); sky2_set_power_state(hw, PCI_D3hot); sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); sky2_write8(hw, B0_CTST, CS_RST_SET); diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 89dd18cd12f0..b026f5653f04 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -1880,6 +1880,8 @@ struct sky2_hw { struct sky2_status_le *st_le; u32 st_idx; dma_addr_t st_dma; + + struct timer_list idle_timer; int msi_detected; wait_queue_head_t msi_wait; }; diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h index 2d520e4b0276..b7d77638ba8c 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h @@ -213,6 +213,14 @@ static inline void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) { } +static inline +void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring) +{ +} +static inline +void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) +{ +} #endif /* CONFIG_BCM43XX_DMA */ #endif /* BCM43xx_DMA_H_ */ diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c index c59ddd40680d..0aa1bd269a25 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c @@ -27,6 +27,7 @@ #include "bcm43xx_pio.h" #include "bcm43xx_main.h" #include "bcm43xx_xmit.h" +#include "bcm43xx_power.h" #include <linux/delay.h> @@ -44,10 +45,10 @@ static void tx_octet(struct bcm43xx_pioqueue *queue, bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, octet); bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, - BCM43xx_PIO_TXCTL_WRITEHI); + BCM43xx_PIO_TXCTL_WRITELO); } else { bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, - BCM43xx_PIO_TXCTL_WRITEHI); + BCM43xx_PIO_TXCTL_WRITELO); bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, octet); } @@ -103,7 +104,7 @@ static void tx_complete(struct bcm43xx_pioqueue *queue, bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, skb->data[skb->len - 1]); bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, - BCM43xx_PIO_TXCTL_WRITEHI | + BCM43xx_PIO_TXCTL_WRITELO | BCM43xx_PIO_TXCTL_COMPLETE); } else { bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, @@ -112,9 +113,10 @@ static void tx_complete(struct bcm43xx_pioqueue *queue, } static u16 generate_cookie(struct bcm43xx_pioqueue *queue, - int packetindex) + struct bcm43xx_pio_txpacket *packet) { u16 cookie = 0x0000; + int packetindex; /* We use the upper 4 bits for the PIO * controller ID and the lower 12 bits @@ -135,6 +137,7 @@ static u16 generate_cookie(struct bcm43xx_pioqueue *queue, default: assert(0); } + packetindex = pio_txpacket_getindex(packet); assert(((u16)packetindex & 0xF000) == 0x0000); cookie |= (u16)packetindex; @@ -184,7 +187,7 @@ static void pio_tx_write_fragment(struct bcm43xx_pioqueue *queue, bcm43xx_generate_txhdr(queue->bcm, &txhdr, skb->data, skb->len, (packet->xmitted_frags == 0), - generate_cookie(queue, pio_txpacket_getindex(packet))); + generate_cookie(queue, packet)); tx_start(queue); octets = skb->len + sizeof(txhdr); @@ -241,7 +244,7 @@ static int pio_tx_packet(struct bcm43xx_pio_txpacket *packet) queue->tx_devq_packets++; queue->tx_devq_used += octets; - assert(packet->xmitted_frags <= packet->txb->nr_frags); + assert(packet->xmitted_frags < packet->txb->nr_frags); packet->xmitted_frags++; packet->xmitted_octets += octets; } @@ -257,8 +260,14 @@ static void tx_tasklet(unsigned long d) unsigned long flags; struct bcm43xx_pio_txpacket *packet, *tmp_packet; int err; + u16 txctl; bcm43xx_lock_mmio(bcm, flags); + + txctl = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL); + if (txctl & BCM43xx_PIO_TXCTL_SUSPEND) + goto out_unlock; + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { assert(packet->xmitted_frags < packet->txb->nr_frags); if (packet->xmitted_frags == 0) { @@ -288,6 +297,7 @@ static void tx_tasklet(unsigned long d) next_packet: continue; } +out_unlock: bcm43xx_unlock_mmio(bcm, flags); } @@ -330,12 +340,19 @@ struct bcm43xx_pioqueue * bcm43xx_setup_pioqueue(struct bcm43xx_private *bcm, (unsigned long)queue); value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); - value |= BCM43xx_SBF_XFER_REG_BYTESWAP; + value &= ~BCM43xx_SBF_XFER_REG_BYTESWAP; bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value); qsize = bcm43xx_read16(bcm, queue->mmio_base + BCM43xx_PIO_TXQBUFSIZE); + if (qsize == 0) { + printk(KERN_ERR PFX "ERROR: This card does not support PIO " + "operation mode. Please use DMA mode " + "(module parameter pio=0).\n"); + goto err_freequeue; + } if (qsize <= BCM43xx_PIO_TXQADJUST) { - printk(KERN_ERR PFX "PIO tx device-queue too small (%u)\n", qsize); + printk(KERN_ERR PFX "PIO tx device-queue too small (%u)\n", + qsize); goto err_freequeue; } qsize -= BCM43xx_PIO_TXQADJUST; @@ -444,15 +461,10 @@ int bcm43xx_pio_tx(struct bcm43xx_private *bcm, { struct bcm43xx_pioqueue *queue = bcm43xx_current_pio(bcm)->queue1; struct bcm43xx_pio_txpacket *packet; - u16 tmp; assert(!queue->tx_suspended); assert(!list_empty(&queue->txfree)); - tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL); - if (tmp & BCM43xx_PIO_TXCTL_SUSPEND) - return -EBUSY; - packet = list_entry(queue->txfree.next, struct bcm43xx_pio_txpacket, list); packet->txb = txb; packet->xmitted_frags = 0; @@ -462,7 +474,7 @@ int bcm43xx_pio_tx(struct bcm43xx_private *bcm, assert(queue->nr_txfree < BCM43xx_PIO_MAXTXPACKETS); /* Suspend TX, if we are out of packets in the "free" queue. */ - if (unlikely(list_empty(&queue->txfree))) { + if (list_empty(&queue->txfree)) { netif_stop_queue(queue->bcm->net_dev); queue->tx_suspended = 1; } @@ -480,15 +492,15 @@ void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm, queue = parse_cookie(bcm, status->cookie, &packet); assert(queue); -//TODO -if (!queue) -return; + free_txpacket(packet, 1); - if (unlikely(queue->tx_suspended)) { + if (queue->tx_suspended) { queue->tx_suspended = 0; netif_wake_queue(queue->bcm->net_dev); } - /* If there are packets on the txqueue, poke the tasklet. */ + /* If there are packets on the txqueue, poke the tasklet + * to transmit them. + */ if (!list_empty(&queue->txqueue)) tasklet_schedule(&queue->txtask); } @@ -519,12 +531,9 @@ void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue) int i, preamble_readwords; struct sk_buff *skb; -return; tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL); - if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE)) { - dprintkl(KERN_ERR PFX "PIO RX: No data available\n");//TODO: remove this printk. + if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE)) return; - } bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL, BCM43xx_PIO_RXCTL_DATAAVAILABLE); @@ -538,8 +547,7 @@ return; return; data_ready: -//FIXME: endianess in this function. - len = le16_to_cpu(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA)); + len = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); if (unlikely(len > 0x700)) { pio_rx_error(queue, 0, "len > 0x700"); return; @@ -555,7 +563,7 @@ data_ready: preamble_readwords = 18 / sizeof(u16); for (i = 0; i < preamble_readwords; i++) { tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); - preamble[i + 1] = cpu_to_be16(tmp);//FIXME? + preamble[i + 1] = cpu_to_le16(tmp); } rxhdr = (struct bcm43xx_rxhdr *)preamble; rxflags2 = le16_to_cpu(rxhdr->flags2); @@ -591,16 +599,40 @@ data_ready: } skb_put(skb, len); for (i = 0; i < len - 1; i += 2) { - tmp = cpu_to_be16(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA)); - *((u16 *)(skb->data + i)) = tmp; + tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); + *((u16 *)(skb->data + i)) = cpu_to_le16(tmp); } if (len % 2) { tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); skb->data[len - 1] = (tmp & 0x00FF); +/* The specs say the following is required, but + * it is wrong and corrupts the PLCP. If we don't do + * this, the PLCP seems to be correct. So ifdef it out for now. + */ +#if 0 if (rxflags2 & BCM43xx_RXHDR_FLAGS2_TYPE2FRAME) - skb->data[0x20] = (tmp & 0xFF00) >> 8; + skb->data[2] = (tmp & 0xFF00) >> 8; else - skb->data[0x1E] = (tmp & 0xFF00) >> 8; + skb->data[0] = (tmp & 0xFF00) >> 8; +#endif } + skb_trim(skb, len - IEEE80211_FCS_LEN); bcm43xx_rx(queue->bcm, skb, rxhdr); } + +void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue) +{ + bcm43xx_power_saving_ctl_bits(queue->bcm, -1, 1); + bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, + bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL) + | BCM43xx_PIO_TXCTL_SUSPEND); +} + +void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue) +{ + bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, + bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL) + & ~BCM43xx_PIO_TXCTL_SUSPEND); + bcm43xx_power_saving_ctl_bits(queue->bcm, -1, -1); + tasklet_schedule(&queue->txtask); +} diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h index 970627bc1769..dfc78209e3a3 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h @@ -14,8 +14,8 @@ #define BCM43xx_PIO_RXCTL 0x08 #define BCM43xx_PIO_RXDATA 0x0A -#define BCM43xx_PIO_TXCTL_WRITEHI (1 << 0) -#define BCM43xx_PIO_TXCTL_WRITELO (1 << 1) +#define BCM43xx_PIO_TXCTL_WRITELO (1 << 0) +#define BCM43xx_PIO_TXCTL_WRITEHI (1 << 1) #define BCM43xx_PIO_TXCTL_COMPLETE (1 << 2) #define BCM43xx_PIO_TXCTL_INIT (1 << 3) #define BCM43xx_PIO_TXCTL_SUSPEND (1 << 7) @@ -95,6 +95,7 @@ void bcm43xx_pio_write(struct bcm43xx_pioqueue *queue, u16 offset, u16 value) { bcm43xx_write16(queue->bcm, queue->mmio_base + offset, value); + mmiowb(); } @@ -107,6 +108,9 @@ void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm, struct bcm43xx_xmitstatus *status); void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue); +void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue); +void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue); + #else /* CONFIG_BCM43XX_PIO */ static inline @@ -133,6 +137,14 @@ static inline void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue) { } +static inline +void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue) +{ +} +static inline +void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue) +{ +} #endif /* CONFIG_BCM43XX_PIO */ #endif /* BCM43xx_PIO_H_ */ diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index 8b37e824dfcb..8399de581893 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -1860,7 +1860,7 @@ static char * __prism2_translate_scan(local_info_t *local, memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; if (scan) { - chan = scan->chid; + chan = le16_to_cpu(scan->chid); } else if (bss) { chan = bss->chan; } else { @@ -1868,7 +1868,7 @@ static char * __prism2_translate_scan(local_info_t *local, } if (chan > 0) { - iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000; + iwe.u.freq.m = freq_list[chan - 1] * 100000; iwe.u.freq.e = 1; current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 42b457030b03..0eb010a3f5bc 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1614,6 +1614,7 @@ static int activate_ep_files (struct dev_data *dev) data, &ep_config_operations, &data->dentry); if (!data->inode) { + usb_ep_free_request(ep, data->req); kfree (data); goto enomem; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index aaf151cb5822..d2ec806a4f32 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3447,10 +3447,13 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, pSesInfo->server->secMode, pSesInfo->server->capabilities, pSesInfo->server->timeZone)); +#ifdef CONFIG_CIFS_EXPERIMENTAL if(experimEnabled > 1) rc = CIFS_SessSetup(xid, pSesInfo, CIFS_NTLM /* type */, &ntlmv2_flag, nls_info); - else if (extended_security + else +#endif + if (extended_security && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) && (pSesInfo->server->secType == NTLMSSP)) { cFYI(1, ("New style sesssetup")); diff --git a/fs/compat.c b/fs/compat.c index 7f8e26ea427c..2e32bd340474 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1217,6 +1217,10 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, if (ret < 0) goto out; + ret = security_file_permission(file, type == READ ? MAY_READ:MAY_WRITE); + if (ret) + goto out; + fnv = NULL; if (type == READ) { fn = file->f_op->read; diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index aaf1da17b6d4..8c22aa9a7fbb 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -48,6 +48,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, if (!S_ISDIR(inode->i_mode)) flags &= ~EXT3_DIRSYNC_FL; + mutex_lock(&inode->i_mutex); oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ @@ -60,8 +61,10 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) { - if (!capable(CAP_LINUX_IMMUTABLE)) + if (!capable(CAP_LINUX_IMMUTABLE)) { + mutex_unlock(&inode->i_mutex); return -EPERM; + } } /* @@ -69,14 +72,18 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, * the relevant capability. */ if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) { - if (!capable(CAP_SYS_RESOURCE)) + if (!capable(CAP_SYS_RESOURCE)) { + mutex_unlock(&inode->i_mutex); return -EPERM; + } } handle = ext3_journal_start(inode, 1); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { + mutex_unlock(&inode->i_mutex); return PTR_ERR(handle); + } if (IS_SYNC(inode)) handle->h_sync = 1; err = ext3_reserve_inode_write(handle, inode, &iloc); @@ -93,11 +100,14 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, err = ext3_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext3_journal_stop(handle); - if (err) + if (err) { + mutex_unlock(&inode->i_mutex); return err; + } if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) err = ext3_change_inode_journal_flag(inode, jflag); + mutex_unlock(&inode->i_mutex); return err; } case EXT3_IOC_GETVERSION: diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index c5ffa8523968..8aac5334680d 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c @@ -213,7 +213,7 @@ static int setup_new_group_blocks(struct super_block *sb, goto exit_bh; } lock_buffer(bh); - memcpy(gdb->b_data, sbi->s_group_desc[i], bh->b_size); + memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, bh->b_size); set_buffer_uptodate(gdb); unlock_buffer(bh); ext3_journal_dirty_metadata(handle, gdb); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index cc750c68fe70..104a62dadb94 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -128,14 +128,24 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) } } -void fuse_remove_background(struct fuse_conn *fc, struct fuse_req *req) +/* + * Called with sbput_sem held for read (request_end) or write + * (fuse_put_super). By the time fuse_put_super() is finished, all + * inodes belonging to background requests must be released, so the + * iputs have to be done within the locked region. + */ +void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) { - list_del_init(&req->bg_entry); + iput(req->inode); + iput(req->inode2); + spin_lock(&fc->lock); + list_del(&req->bg_entry); if (fc->num_background == FUSE_MAX_BACKGROUND) { fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } fc->num_background--; + spin_unlock(&fc->lock); } /* @@ -165,27 +175,22 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) wake_up(&req->waitq); fuse_put_request(fc, req); } else { - struct inode *inode = req->inode; - struct inode *inode2 = req->inode2; - struct file *file = req->file; void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; req->end = NULL; - req->inode = NULL; - req->inode2 = NULL; - req->file = NULL; - if (!list_empty(&req->bg_entry)) - fuse_remove_background(fc, req); spin_unlock(&fc->lock); + down_read(&fc->sbput_sem); + if (fc->mounted) + fuse_release_background(fc, req); + up_read(&fc->sbput_sem); + + /* fput must go outside sbput_sem, otherwise it can deadlock */ + if (req->file) + fput(req->file); if (end) end(fc, req); else fuse_put_request(fc, req); - - if (file) - fput(file); - iput(inode); - iput(inode2); } } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 59661c481d9d..0474202cb5dc 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -258,9 +258,15 @@ struct fuse_conn { /** waitq for blocked connection */ wait_queue_head_t blocked_waitq; + /** RW semaphore for exclusion with fuse_put_super() */ + struct rw_semaphore sbput_sem; + /** The next unique request id */ u64 reqctr; + /** Mount is active */ + unsigned mounted; + /** Connection established, cleared on umount, connection abort and device release */ unsigned connected; @@ -471,11 +477,11 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req); void request_send_background(struct fuse_conn *fc, struct fuse_req *req); /** - * Remove request from the the background list + * Release inodes and file associated with background request */ -void fuse_remove_background(struct fuse_conn *fc, struct fuse_req *req); +void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req); -/** Abort all requests */ +/* Abort all requests */ void fuse_abort_conn(struct fuse_conn *fc); /** diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 43a6fc0db8a7..7627022446b2 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -204,26 +204,17 @@ static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); + down_write(&fc->sbput_sem); + while (!list_empty(&fc->background)) + fuse_release_background(fc, + list_entry(fc->background.next, + struct fuse_req, bg_entry)); + spin_lock(&fc->lock); + fc->mounted = 0; fc->connected = 0; - while (!list_empty(&fc->background)) { - struct fuse_req *req = list_entry(fc->background.next, - struct fuse_req, bg_entry); - struct inode *inode = req->inode; - struct inode *inode2 = req->inode2; - - /* File would hold a reference to vfsmount */ - BUG_ON(req->file); - req->inode = NULL; - req->inode2 = NULL; - fuse_remove_background(fc, req); - - spin_unlock(&fc->lock); - iput(inode); - iput(inode2); - spin_lock(&fc->lock); - } spin_unlock(&fc->lock); + up_write(&fc->sbput_sem); /* Flush all readers on this fs */ kill_fasync(&fc->fasync, SIGIO, POLL_IN); wake_up_all(&fc->waitq); @@ -395,6 +386,7 @@ static struct fuse_conn *new_conn(void) INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->background); + init_rwsem(&fc->sbput_sem); kobj_set_kset_s(fc, connections_subsys); kobject_init(&fc->kobj); atomic_set(&fc->num_waiting, 0); @@ -508,11 +500,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (file->f_op != &fuse_dev_operations) return -EINVAL; - /* Setting file->private_data can't race with other mount() - instances, since BKL is held for ->get_sb() */ - if (file->private_data) - return -EINVAL; - fc = new_conn(); if (!fc) return -ENOMEM; @@ -548,7 +535,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (err) goto err_free_req; + /* Setting file->private_data can't race with other mount() + instances, since BKL is held for ->get_sb() */ + err = -EINVAL; + if (file->private_data) + goto err_kobject_del; + sb->s_root = root_dentry; + fc->mounted = 1; fc->connected = 1; kobject_get(&fc->kobj); file->private_data = fc; @@ -563,6 +557,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) return 0; + err_kobject_del: + kobject_del(&fc->kobj); err_free_req: fuse_request_free(init_req); err_put_root: diff --git a/fs/splice.c b/fs/splice.c index 0559e7577a04..447ebc0a37f3 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -27,15 +27,22 @@ #include <linux/buffer_head.h> #include <linux/module.h> #include <linux/syscalls.h> +#include <linux/uio.h> + +struct partial_page { + unsigned int offset; + unsigned int len; +}; /* - * Passed to the actors + * Passed to splice_to_pipe */ -struct splice_desc { - unsigned int len, total_len; /* current and remaining length */ +struct splice_pipe_desc { + struct page **pages; /* page map */ + struct partial_page *partial; /* pages[] may not be contig */ + int nr_pages; /* number of pages in map */ unsigned int flags; /* splice flags */ - struct file *file; /* file to read/write */ - loff_t pos; /* file position */ + struct pipe_buf_operations *ops;/* ops associated with output pipe */ }; /* @@ -128,6 +135,19 @@ static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info, kunmap(buf->page); } +static void *user_page_pipe_buf_map(struct file *file, + struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return kmap(buf->page); +} + +static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + kunmap(buf->page); +} + static void page_cache_pipe_buf_get(struct pipe_inode_info *info, struct pipe_buffer *buf) { @@ -143,19 +163,33 @@ static struct pipe_buf_operations page_cache_pipe_buf_ops = { .get = page_cache_pipe_buf_get, }; +static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return 1; +} + +static struct pipe_buf_operations user_page_pipe_buf_ops = { + .can_merge = 0, + .map = user_page_pipe_buf_map, + .unmap = user_page_pipe_buf_unmap, + .release = page_cache_pipe_buf_release, + .steal = user_page_pipe_buf_steal, + .get = page_cache_pipe_buf_get, +}; + /* * Pipe output worker. This sets up our pipe format with the page cache * pipe buffer operations. Otherwise very similar to the regular pipe_writev(). */ -static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages, - int nr_pages, unsigned long len, - unsigned int offset, unsigned int flags) +static ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + struct splice_pipe_desc *spd) { - int ret, do_wakeup, i; + int ret, do_wakeup, page_nr; ret = 0; do_wakeup = 0; - i = 0; + page_nr = 0; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); @@ -171,27 +205,19 @@ static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages, if (pipe->nrbufs < PIPE_BUFFERS) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; - struct page *page = pages[i++]; - unsigned long this_len; - this_len = PAGE_CACHE_SIZE - offset; - if (this_len > len) - this_len = len; - - buf->page = page; - buf->offset = offset; - buf->len = this_len; - buf->ops = &page_cache_pipe_buf_ops; + buf->page = spd->pages[page_nr]; + buf->offset = spd->partial[page_nr].offset; + buf->len = spd->partial[page_nr].len; + buf->ops = spd->ops; pipe->nrbufs++; + page_nr++; + ret += buf->len; + if (pipe->inode) do_wakeup = 1; - ret += this_len; - len -= this_len; - offset = 0; - if (!--nr_pages) - break; - if (!len) + if (!--spd->nr_pages) break; if (pipe->nrbufs < PIPE_BUFFERS) continue; @@ -199,7 +225,7 @@ static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages, break; } - if (flags & SPLICE_F_NONBLOCK) { + if (spd->flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; @@ -234,8 +260,8 @@ static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages, kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } - while (i < nr_pages) - page_cache_release(pages[i++]); + while (page_nr < spd->nr_pages) + page_cache_release(spd->pages[page_nr++]); return ret; } @@ -246,17 +272,24 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, unsigned int flags) { struct address_space *mapping = in->f_mapping; - unsigned int loff, offset, nr_pages; + unsigned int loff, nr_pages; struct page *pages[PIPE_BUFFERS]; + struct partial_page partial[PIPE_BUFFERS]; struct page *page; pgoff_t index, end_index; loff_t isize; - size_t bytes; - int i, error; + size_t total_len; + int error; + struct splice_pipe_desc spd = { + .pages = pages, + .partial = partial, + .flags = flags, + .ops = &page_cache_pipe_buf_ops, + }; index = *ppos >> PAGE_CACHE_SHIFT; - loff = offset = *ppos & ~PAGE_CACHE_MASK; - nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + loff = *ppos & ~PAGE_CACHE_MASK; + nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (nr_pages > PIPE_BUFFERS) nr_pages = PIPE_BUFFERS; @@ -266,15 +299,15 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, * read-ahead if this is a non-zero offset (we are likely doing small * chunk splice and the page is already there) for a single page. */ - if (!offset || nr_pages > 1) - do_page_cache_readahead(mapping, in, index, nr_pages); + if (!loff || spd.nr_pages > 1) + do_page_cache_readahead(mapping, in, index, spd.nr_pages); /* * Now fill in the holes: */ error = 0; - bytes = 0; - for (i = 0; i < nr_pages; i++, index++) { + total_len = 0; + for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) { unsigned int this_len; if (!len) @@ -283,7 +316,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, /* * this_len is the max we'll use from this page */ - this_len = min(len, PAGE_CACHE_SIZE - loff); + this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); find_page: /* * lookup the page for this index @@ -367,26 +400,29 @@ readpage: */ if (end_index == index) { loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); - if (bytes + loff > isize) { + if (total_len + loff > isize) { page_cache_release(page); break; } /* * force quit after adding this page */ - nr_pages = i; + nr_pages = spd.nr_pages; this_len = min(this_len, loff); + loff = 0; } } fill_it: - pages[i] = page; - bytes += this_len; + pages[spd.nr_pages] = page; + partial[spd.nr_pages].offset = loff; + partial[spd.nr_pages].len = this_len; len -= this_len; + total_len += this_len; loff = 0; } - if (i) - return move_to_pipe(pipe, pages, i, bytes, offset, flags); + if (spd.nr_pages) + return splice_to_pipe(pipe, &spd); return error; } @@ -439,14 +475,13 @@ EXPORT_SYMBOL(generic_file_splice_read); /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' - * using sendpage(). + * using sendpage(). Return the number of bytes sent. */ static int pipe_to_sendpage(struct pipe_inode_info *info, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->file; loff_t pos = sd->pos; - unsigned int offset; ssize_t ret; void *ptr; int more; @@ -461,16 +496,13 @@ static int pipe_to_sendpage(struct pipe_inode_info *info, if (IS_ERR(ptr)) return PTR_ERR(ptr); - offset = pos & ~PAGE_CACHE_MASK; more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; - ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more); + ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len, + &pos, more); buf->ops->unmap(info, buf); - if (ret == sd->len) - return 0; - - return -EIO; + return ret; } /* @@ -499,7 +531,7 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, struct file *file = sd->file; struct address_space *mapping = file->f_mapping; gfp_t gfp_mask = mapping_gfp_mask(mapping); - unsigned int offset; + unsigned int offset, this_len; struct page *page; pgoff_t index; char *src; @@ -515,6 +547,10 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, index = sd->pos >> PAGE_CACHE_SHIFT; offset = sd->pos & ~PAGE_CACHE_MASK; + this_len = sd->len; + if (this_len + offset > PAGE_CACHE_SIZE) + this_len = PAGE_CACHE_SIZE - offset; + /* * Reuse buf page, if SPLICE_F_MOVE is set. */ @@ -558,7 +594,7 @@ find_page: * the full page. */ if (!PageUptodate(page)) { - if (sd->len < PAGE_CACHE_SIZE) { + if (this_len < PAGE_CACHE_SIZE) { ret = mapping->a_ops->readpage(file, page); if (unlikely(ret)) goto out; @@ -582,7 +618,7 @@ find_page: } } - ret = mapping->a_ops->prepare_write(file, page, 0, sd->len); + ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); if (ret == AOP_TRUNCATED_PAGE) { page_cache_release(page); goto find_page; @@ -592,18 +628,22 @@ find_page: if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) { char *dst = kmap_atomic(page, KM_USER0); - memcpy(dst + offset, src + buf->offset, sd->len); + memcpy(dst + offset, src + buf->offset, this_len); flush_dcache_page(page); kunmap_atomic(dst, KM_USER0); } - ret = mapping->a_ops->commit_write(file, page, 0, sd->len); + ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); if (ret == AOP_TRUNCATED_PAGE) { page_cache_release(page); goto find_page; } else if (ret) goto out; + /* + * Return the number of bytes written. + */ + ret = this_len; mark_page_accessed(page); balance_dirty_pages_ratelimited(mapping); out: @@ -616,17 +656,14 @@ out_nomem: return ret; } -typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, - struct splice_desc *); - /* * Pipe input worker. Most of this logic works like a regular pipe, the * key here is the 'actor' worker passed in that actually moves the data * to the wanted destination. See pipe_to_file/pipe_to_sendpage above. */ -static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out, - loff_t *ppos, size_t len, unsigned int flags, - splice_actor *actor) +ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags, + splice_actor *actor) { int ret, do_wakeup, err; struct splice_desc sd; @@ -652,16 +689,22 @@ static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out, sd.len = sd.total_len; err = actor(pipe, buf, &sd); - if (err) { + if (err <= 0) { if (!ret && err != -ENODATA) ret = err; break; } - ret += sd.len; - buf->offset += sd.len; - buf->len -= sd.len; + ret += err; + buf->offset += err; + buf->len -= err; + + sd.len -= err; + sd.pos += err; + sd.total_len -= err; + if (sd.len) + continue; if (!buf->len) { buf->ops = NULL; @@ -672,8 +715,6 @@ static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out, do_wakeup = 1; } - sd.pos += sd.len; - sd.total_len -= sd.len; if (!sd.total_len) break; } @@ -741,7 +782,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, struct address_space *mapping = out->f_mapping; ssize_t ret; - ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); + ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); if (ret > 0) { struct inode *inode = mapping->host; @@ -783,7 +824,7 @@ EXPORT_SYMBOL(generic_file_splice_write); ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { - return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); + return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); } EXPORT_SYMBOL(generic_splice_sendpage); @@ -870,7 +911,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, /* * We don't have an immediate reader, but we'll read the stuff - * out of the pipe right after the move_to_pipe(). So set + * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; @@ -1010,6 +1051,174 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; } +/* + * Map an iov into an array of pages and offset/length tupples. With the + * partial_page structure, we can map several non-contiguous ranges into + * our ones pages[] map instead of splitting that operation into pieces. + * Could easily be exported as a generic helper for other users, in which + * case one would probably want to add a 'max_nr_pages' parameter as well. + */ +static int get_iovec_page_array(const struct iovec __user *iov, + unsigned int nr_vecs, struct page **pages, + struct partial_page *partial) +{ + int buffers = 0, error = 0; + + /* + * It's ok to take the mmap_sem for reading, even + * across a "get_user()". + */ + down_read(¤t->mm->mmap_sem); + + while (nr_vecs) { + unsigned long off, npages; + void __user *base; + size_t len; + int i; + + /* + * Get user address base and length for this iovec. + */ + error = get_user(base, &iov->iov_base); + if (unlikely(error)) + break; + error = get_user(len, &iov->iov_len); + if (unlikely(error)) + break; + + /* + * Sanity check this iovec. 0 read succeeds. + */ + if (unlikely(!len)) + break; + error = -EFAULT; + if (unlikely(!base)) + break; + + /* + * Get this base offset and number of pages, then map + * in the user pages. + */ + off = (unsigned long) base & ~PAGE_MASK; + npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (npages > PIPE_BUFFERS - buffers) + npages = PIPE_BUFFERS - buffers; + + error = get_user_pages(current, current->mm, + (unsigned long) base, npages, 0, 0, + &pages[buffers], NULL); + + if (unlikely(error <= 0)) + break; + + /* + * Fill this contiguous range into the partial page map. + */ + for (i = 0; i < error; i++) { + const int plen = min_t(size_t, len, PAGE_SIZE) - off; + + partial[buffers].offset = off; + partial[buffers].len = plen; + + off = 0; + len -= plen; + buffers++; + } + + /* + * We didn't complete this iov, stop here since it probably + * means we have to move some of this into a pipe to + * be able to continue. + */ + if (len) + break; + + /* + * Don't continue if we mapped fewer pages than we asked for, + * or if we mapped the max number of pages that we have + * room for. + */ + if (error < npages || buffers == PIPE_BUFFERS) + break; + + nr_vecs--; + iov++; + } + + up_read(¤t->mm->mmap_sem); + + if (buffers) + return buffers; + + return error; +} + +/* + * vmsplice splices a user address range into a pipe. It can be thought of + * as splice-from-memory, where the regular splice is splice-from-file (or + * to file). In both cases the output is a pipe, naturally. + * + * Note that vmsplice only supports splicing _from_ user memory to a pipe, + * not the other way around. Splicing from user memory is a simple operation + * that can be supported without any funky alignment restrictions or nasty + * vm tricks. We simply map in the user memory and fill them into a pipe. + * The reverse isn't quite as easy, though. There are two possible solutions + * for that: + * + * - memcpy() the data internally, at which point we might as well just + * do a regular read() on the buffer anyway. + * - Lots of nasty vm tricks, that are neither fast nor flexible (it + * has restriction limitations on both ends of the pipe). + * + * Alas, it isn't here. + * + */ +static long do_vmsplice(struct file *file, const struct iovec __user *iov, + unsigned long nr_segs, unsigned int flags) +{ + struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe; + struct page *pages[PIPE_BUFFERS]; + struct partial_page partial[PIPE_BUFFERS]; + struct splice_pipe_desc spd = { + .pages = pages, + .partial = partial, + .flags = flags, + .ops = &user_page_pipe_buf_ops, + }; + + if (unlikely(!pipe)) + return -EBADF; + if (unlikely(nr_segs > UIO_MAXIOV)) + return -EINVAL; + else if (unlikely(!nr_segs)) + return 0; + + spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial); + if (spd.nr_pages <= 0) + return spd.nr_pages; + + return splice_to_pipe(pipe, &spd); +} + +asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, + unsigned long nr_segs, unsigned int flags) +{ + struct file *file; + long error; + int fput; + + error = -EBADF; + file = fget_light(fd, &fput); + if (file) { + if (file->f_mode & FMODE_WRITE) + error = do_vmsplice(file, iov, nr_segs, flags); + + fput_light(file, fput); + } + + return error; +} + asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, int fd_out, loff_t __user *off_out, size_t len, unsigned int flags) diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index d81d6cfc1bb4..eb4b152c82fc 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -321,8 +321,9 @@ #define __NR_splice 313 #define __NR_sync_file_range 314 #define __NR_tee 315 +#define __NR_vmsplice 316 -#define NR_syscalls 316 +#define NR_syscalls 317 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index a40ebec6aeeb..7107763168bf 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -290,12 +290,13 @@ #define __NR_get_robust_list 1299 #define __NR_sync_file_range 1300 #define __NR_tee 1301 +#define __NR_vmsplice 1302 #ifdef __KERNEL__ #include <linux/config.h> -#define NR_syscalls 278 /* length of syscall table */ +#define NR_syscalls 279 /* length of syscall table */ #define __ARCH_WANT_SYS_RT_SIGACTION diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h index c612f1a62772..34325e292596 100644 --- a/include/asm-powerpc/unistd.h +++ b/include/asm-powerpc/unistd.h @@ -303,8 +303,9 @@ #define __NR_unshare 282 #define __NR_splice 283 #define __NR_tee 284 +#define __NR_vmsplice 285 -#define __NR_syscalls 285 +#define __NR_syscalls 286 #ifdef __KERNEL__ #define __NR__exit __NR_exit diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h index 45feff893b8e..32a48f623e2b 100644 --- a/include/asm-sparc/unistd.h +++ b/include/asm-sparc/unistd.h @@ -271,7 +271,7 @@ #define __NR_getsid 252 #define __NR_fdatasync 253 #define __NR_nfsservctl 254 -#define __NR_sys_sync_file_range 255 +#define __NR_sync_file_range 255 #define __NR_clock_settime 256 #define __NR_clock_gettime 257 #define __NR_clock_getres 258 diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 597f6923a46e..ca80e8aca128 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -273,7 +273,7 @@ #define __NR_getsid 252 #define __NR_fdatasync 253 #define __NR_nfsservctl 254 -#define __NR_sys_sync_file_range 255 +#define __NR_sync_file_range 255 #define __NR_clock_settime 256 #define __NR_clock_gettime 257 #define __NR_clock_getres 258 diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 98c36eae567c..feb77cb8c044 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -615,8 +615,10 @@ __SYSCALL(__NR_splice, sys_splice) __SYSCALL(__NR_tee, sys_tee) #define __NR_sync_file_range 277 __SYSCALL(__NR_sync_file_range, sys_sync_file_range) +#define __NR_vmsplice 278 +__SYSCALL(__NR_vmsplice, sys_vmsplice) -#define __NR_syscall_max __NR_sync_file_range +#define __NR_syscall_max __NR_vmsplice #ifndef __NO_STUBS diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 40ccf8cc4239..01db7b88a2b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -829,19 +829,21 @@ static inline void netif_rx_schedule(struct net_device *dev) __netif_rx_schedule(dev); } -/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). - * Do not inline this? - */ + +static inline void __netif_rx_reschedule(struct net_device *dev, int undo) +{ + dev->quota += undo; + list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); + __raise_softirq_irqoff(NET_RX_SOFTIRQ); +} + +/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ static inline int netif_rx_reschedule(struct net_device *dev, int undo) { if (netif_rx_schedule_prep(dev)) { unsigned long flags; - - dev->quota += undo; - local_irq_save(flags); - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + __netif_rx_reschedule(dev, undo); local_irq_restore(flags); return 1; } diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index f6bdef82a322..38701454e197 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -361,7 +361,11 @@ struct compat_xt_entry_target struct compat_xt_counters { +#if defined(CONFIG_X86_64) || defined(CONFIG_IA64) u_int32_t cnt[4]; +#else + u_int64_t cnt[2]; +#endif }; struct compat_xt_counters_info diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index ef7f33c0be19..0008d4bd4059 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -61,4 +61,21 @@ void __free_pipe_info(struct pipe_inode_info *); /* from/to, of course */ #define SPLICE_F_MORE (0x04) /* expect more data */ +/* + * Passed to the actors + */ +struct splice_desc { + unsigned int len, total_len; /* current and remaining length */ + unsigned int flags; /* splice flags */ + struct file *file; /* file to read/write */ + loff_t pos; /* file position */ +}; + +typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, + struct splice_desc *); + +extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, + loff_t *, size_t, unsigned int, + splice_actor *); + #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index d3ebc0e68b2b..3996960fc565 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -574,6 +574,9 @@ asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, int fd_out, loff_t __user *off_out, size_t len, unsigned int flags); +asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, + unsigned long nr_segs, unsigned int flags); + asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h index 6b3693f05ca0..b1ebfbae397f 100644 --- a/include/net/ieee80211softmac.h +++ b/include/net/ieee80211softmac.h @@ -96,10 +96,13 @@ struct ieee80211softmac_assoc_info { * * bssvalid is true if we found a matching network * and saved it's BSSID into the bssid above. + * + * bssfixed is used for SIOCSIWAP. */ u8 static_essid:1, associating:1, - bssvalid:1; + bssvalid:1, + bssfixed:1; /* Scan retries remaining */ int scan_retry; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d2a7296c8251..b7f0388bd71c 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -836,7 +836,7 @@ static void migrate_hrtimers(int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit hrtimer_cpu_notify(struct notifier_block *self, +static int hrtimer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -860,7 +860,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata hrtimers_nb = { +static struct notifier_block hrtimers_nb = { .notifier_call = hrtimer_cpu_notify, }; diff --git a/kernel/profile.c b/kernel/profile.c index 5a730fdb1a2c..68afe121e507 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -299,7 +299,7 @@ out: } #ifdef CONFIG_HOTPLUG_CPU -static int __devinit profile_cpu_callback(struct notifier_block *info, +static int profile_cpu_callback(struct notifier_block *info, unsigned long action, void *__cpu) { int node, cpu = (unsigned long)__cpu; diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 13458bbaa1be..6d32ff26f948 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -520,7 +520,7 @@ static void __devinit rcu_online_cpu(int cpu) tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); } -static int __devinit rcu_cpu_notify(struct notifier_block *self, +static int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -537,7 +537,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata rcu_nb = { +static struct notifier_block rcu_nb = { .notifier_call = rcu_cpu_notify, }; diff --git a/kernel/sched.c b/kernel/sched.c index 365f0b90b4de..4c64f85698ae 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4814,7 +4814,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, /* Register at highest priority so that task migration (migrate_all_tasks) * happens before everything else. */ -static struct notifier_block __devinitdata migration_notifier = { +static struct notifier_block migration_notifier = { .notifier_call = migration_call, .priority = 10 }; diff --git a/kernel/softirq.c b/kernel/softirq.c index ec8fed42a86f..336f92d64e2e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -484,7 +484,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __devinitdata cpu_nfb = { +static struct notifier_block cpu_nfb = { .notifier_call = cpu_callback }; diff --git a/kernel/softlockup.c b/kernel/softlockup.c index ced91e1ff564..14c7faf02909 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu) /* * Create/destroy watchdog threads as CPUs come and go: */ -static int __devinit +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; @@ -140,7 +140,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_OK; } -static struct notifier_block __devinitdata cpu_nfb = { +static struct notifier_block cpu_nfb = { .notifier_call = cpu_callback }; diff --git a/kernel/timer.c b/kernel/timer.c index 883773788836..67eaf0f54096 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1314,7 +1314,7 @@ static void __devinit migrate_timers(int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit timer_cpu_notify(struct notifier_block *self, +static int timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1334,7 +1334,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata timers_nb = { +static struct notifier_block timers_nb = { .notifier_call = timer_cpu_notify, }; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e9e464a90376..880fb415a8f6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -547,7 +547,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) } /* We're holding the cpucontrol mutex here */ -static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, +static int workqueue_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 123c60586740..ea77c999047e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) } } -static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, +static int pageset_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/mm/slab.c b/mm/slab.c index e6ef9bd52335..af5c5237e11a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1036,7 +1036,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d6941..4649a63a8cb6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1328,7 +1328,7 @@ repeat: not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { pg_data_t *pgdat; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 2d24fb400e0c..56f3aa47e758 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <linux/if_vlan.h> #include <linux/netfilter_bridge.h> #include "br_private.h" @@ -29,10 +30,15 @@ static inline int should_deliver(const struct net_bridge_port *p, return 1; } +static inline unsigned packet_length(const struct sk_buff *skb) +{ + return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); +} + int br_dev_queue_push_xmit(struct sk_buff *skb) { /* drop mtu oversized packets except tso */ - if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size) + if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->tso_size) kfree_skb(skb); else { #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index 4498023841dc..fb79ce7d6439 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -144,6 +144,12 @@ network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_ne if (!we_support_all_basic_rates(mac, net->rates_ex, net->rates_ex_len)) return 0; + /* assume that users know what they're doing ... + * (note we don't let them select a net we're incompatible with) */ + if (mac->associnfo.bssfixed) { + return !memcmp(mac->associnfo.bssid, net->bssid, ETH_ALEN); + } + /* if 'ANY' network requested, take any that doesn't have privacy enabled */ if (mac->associnfo.req_essid.len == 0 && !(net->capability & WLAN_CAPABILITY_PRIVACY)) @@ -176,7 +182,7 @@ ieee80211softmac_assoc_work(void *d) ieee80211softmac_disassoc(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); /* try to find the requested network in our list, if we found one already */ - if (mac->associnfo.bssvalid) + if (mac->associnfo.bssvalid || mac->associnfo.bssfixed) found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); /* Search the ieee80211 networks for this network if we didn't find it by bssid, @@ -241,19 +247,25 @@ ieee80211softmac_assoc_work(void *d) if (ieee80211softmac_start_scan(mac)) dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); return; - } - else { + } else { spin_lock_irqsave(&mac->lock, flags); mac->associnfo.associating = 0; mac->associated = 0; spin_unlock_irqrestore(&mac->lock, flags); dprintk(KERN_INFO PFX "Unable to find matching network after scan!\n"); + /* reset the retry counter for the next user request since we + * break out and don't reschedule ourselves after this point. */ + mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND, NULL); return; } } - + + /* reset the retry counter for the next user request since we + * now found a net and will try to associate to it, but not + * schedule this function again. */ + mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; mac->associnfo.bssvalid = 1; memcpy(mac->associnfo.bssid, found->bssid, ETH_ALEN); /* copy the ESSID for displaying it */ diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 60f06a31f0d1..be83bdc1644a 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c @@ -45,6 +45,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc; softmac->scaninfo = NULL; + softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; + /* TODO: initialise all the other callbacks in the ieee struct * (once they're written) */ diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 00f0d4f71897..27edb2b5581a 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -27,7 +27,8 @@ #include "ieee80211softmac_priv.h" #include <net/iw_handler.h> - +/* for is_broadcast_ether_addr and is_zero_ether_addr */ +#include <linux/etherdevice.h> int ieee80211softmac_wx_trigger_scan(struct net_device *net_dev, @@ -83,7 +84,6 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, sm->associnfo.static_essid = 1; } } - sm->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; /* set our requested ESSID length. * If applicable, we have already copied the data in */ @@ -310,8 +310,6 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, char *extra) { struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); - static const unsigned char any[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - static const unsigned char off[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; unsigned long flags; /* sanity check */ @@ -320,10 +318,17 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, } spin_lock_irqsave(&mac->lock, flags); - if (!memcmp(any, data->ap_addr.sa_data, ETH_ALEN) || - !memcmp(off, data->ap_addr.sa_data, ETH_ALEN)) { - schedule_work(&mac->associnfo.work); - goto out; + if (is_broadcast_ether_addr(data->ap_addr.sa_data)) { + /* the bssid we have is not to be fixed any longer, + * and we should reassociate to the best AP. */ + mac->associnfo.bssfixed = 0; + /* force reassociation */ + mac->associnfo.bssvalid = 0; + if (mac->associated) + schedule_work(&mac->associnfo.work); + } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { + /* the bssid we have is no longer fixed */ + mac->associnfo.bssfixed = 0; } else { if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) { if (mac->associnfo.associating || mac->associated) { @@ -333,12 +338,14 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, } else { /* copy new value in data->ap_addr.sa_data to bssid */ memcpy(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN); - } + } + /* tell the other code that this bssid should be used no matter what */ + mac->associnfo.bssfixed = 1; /* queue associate if new bssid or (old one again and not associated) */ schedule_work(&mac->associnfo.work); } -out: + out: spin_unlock_irqrestore(&mac->lock, flags); return 0; } diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index c60fd5c4ea1e..3d560dec63ab 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -345,7 +345,7 @@ config IP_NF_TARGET_LOG To compile it as a module, choose M here. If unsure, say N. config IP_NF_TARGET_ULOG - tristate "ULOG target support (OBSOLETE)" + tristate "ULOG target support" depends on IP_NF_IPTABLES ---help--- diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 642b4b11464f..0a673038344f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -288,19 +288,6 @@ ip6t_do_table(struct sk_buff **pskb, table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); -#ifdef CONFIG_NETFILTER_DEBUG - /* Check noone else using our table */ - if (((struct ip6t_entry *)table_base)->comefrom != 0xdead57ac - && ((struct ip6t_entry *)table_base)->comefrom != 0xeeeeeeec) { - printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n", - smp_processor_id(), - table->name, - &((struct ip6t_entry *)table_base)->comefrom, - ((struct ip6t_entry *)table_base)->comefrom); - } - ((struct ip6t_entry *)table_base)->comefrom = 0x57acc001; -#endif - /* For return from builtin chain */ back = get_entry(table_base, private->underflow[hook]); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e581190fb6c3..f9b83f91371a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -178,9 +178,6 @@ static struct { /* allocated slab cache + modules which uses this slab cache */ int use; - /* Initialization */ - int (*init_conntrack)(struct nf_conn *, u_int32_t); - } nf_ct_cache[NF_CT_F_NUM]; /* protect members of nf_ct_cache except of "use" */ @@ -208,10 +205,8 @@ nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol) preempt_disable(); p = __nf_ct_proto_find(l3proto, protocol); - if (p) { - if (!try_module_get(p->me)) - p = &nf_conntrack_generic_protocol; - } + if (!try_module_get(p->me)) + p = &nf_conntrack_generic_protocol; preempt_enable(); return p; @@ -229,10 +224,8 @@ nf_ct_l3proto_find_get(u_int16_t l3proto) preempt_disable(); p = __nf_ct_l3proto_find(l3proto); - if (p) { - if (!try_module_get(p->me)) - p = &nf_conntrack_generic_l3proto; - } + if (!try_module_get(p->me)) + p = &nf_conntrack_generic_l3proto; preempt_enable(); return p; diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index 7de4f06c63c5..3fc58e454d4e 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c @@ -94,5 +94,4 @@ struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = { .print_conntrack = generic_print_conntrack, .prepare = generic_prepare, .get_features = generic_get_features, - .me = THIS_MODULE, }; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 00cf0a4f4d92..17abf60f9570 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -529,6 +529,7 @@ int xt_register_table(struct xt_table *table, /* Simplifies replace_table code. */ table->private = bootstrap; + rwlock_init(&table->lock); if (!xt_replace_table(table, 0, newinfo, &ret)) goto unlock; @@ -538,7 +539,6 @@ int xt_register_table(struct xt_table *table, /* save number of initial entries */ private->initial_entries = private->number; - rwlock_init(&table->lock); list_prepend(&xt[table->af].tables, table); ret = 0; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 6056d20ef429..37640c6fc014 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -69,6 +69,11 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook) DPRINTK("ipt_init_target: found %s\n", target->name); t->u.kernel.target = target; + ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), + table, hook, 0, 0); + if (ret) + return ret; + if (t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(table, NULL, t->u.kernel.target, t->data, |