diff options
121 files changed, 1063 insertions, 617 deletions
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 8fbd8b46ee34..dcf338e62b71 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -175,9 +175,9 @@ consists of multiple segments as described below. align with the zone size <-| |-> align with the segment size _________________________________________________________________________ - | | | Node | Segment | Segment | | - | Superblock | Checkpoint | Address | Info. | Summary | Main | - | (SB) | (CP) | Table (NAT) | Table (SIT) | Area (SSA) | | + | | | Segment | Node | Segment | | + | Superblock | Checkpoint | Info. | Address | Summary | Main | + | (SB) | (CP) | Table (SIT) | Table (NAT) | Area (SSA) | | |____________|_____2______|______N______|______N______|______N_____|__N___| . . . . @@ -200,14 +200,14 @@ consists of multiple segments as described below. : It contains file system information, bitmaps for valid NAT/SIT sets, orphan inode lists, and summary entries of current active segments. -- Node Address Table (NAT) - : It is composed of a block address table for all the node blocks stored in - Main area. - - Segment Information Table (SIT) : It contains segment information such as valid block count and bitmap for the validity of all the blocks. +- Node Address Table (NAT) + : It is composed of a block address table for all the node blocks stored in + Main area. + - Segment Summary Area (SSA) : It contains summary entries which contains the owner information of all the data and node blocks stored in Main area. @@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are valid, as shown as below. +--------+----------+---------+ - | CP | NAT | SIT | + | CP | SIT | NAT | +--------+----------+---------+ . . . . . . . . . . . . +-------+-------+--------+--------+--------+--------+ - | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 | + | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 | +-------+-------+--------+--------+--------+--------+ | ^ ^ | | | diff --git a/MAINTAINERS b/MAINTAINERS index 3105c4868c4e..4e734ed187e4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6585,9 +6585,7 @@ F: drivers/media/platform/s3c-camif/ F: include/media/s3c_camif.h SERIAL DRIVERS -M: Alan Cox <alan@linux.intel.com> L: linux-serial@vger.kernel.org -S: Maintained F: drivers/tty/serial SYNOPSYS DESIGNWARE DMAC DRIVER diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 4265ff64219b..b7a5fffe0924 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) read_unlock(&tasklist_lock); } -static inline int -thread_matches (struct task_struct *thread, unsigned long addr) -{ - unsigned long thread_rbs_end; - struct pt_regs *thread_regs; - - if (ptrace_check_attach(thread, 0) < 0) - /* - * If the thread is not in an attachable state, we'll - * ignore it. The net effect is that if ADDR happens - * to overlap with the portion of the thread's - * register backing store that is currently residing - * on the thread's kernel stack, then ptrace() may end - * up accessing a stale value. But if the thread - * isn't stopped, that's a problem anyhow, so we're - * doing as well as we can... - */ - return 0; - - thread_regs = task_pt_regs(thread); - thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); - if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) - return 0; - - return 1; /* looks like we've got a winner */ -} - /* * Write f32-f127 back to task->thread.fph if it has been modified. */ diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h index bf86b29fe64a..037028f4ab70 100644 --- a/arch/m68k/include/asm/pgtable_no.h +++ b/arch/m68k/include/asm/pgtable_no.h @@ -64,6 +64,8 @@ extern unsigned int kobjsize(const void *objp); */ #define VMALLOC_START 0 #define VMALLOC_END 0xffffffff +#define KMAP_START 0 +#define KMAP_END 0xffffffff #include <asm-generic/pgtable.h> diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index f0e05bce92f2..afd8106fd83b 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -39,6 +39,11 @@ void *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) +extern void init_pointer_table(unsigned long ptable); +extern pmd_t *zero_pgtable; +#endif + #ifdef CONFIG_MMU pg_data_t pg_data_map[MAX_NUMNODES]; @@ -69,9 +74,6 @@ void __init m68k_setup_node(int node) node_set_online(node); } -extern void init_pointer_table(unsigned long ptable); -extern pmd_t *zero_pgtable; - #else /* CONFIG_MMU */ /* diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index bfb44247d7a7..eb7850b46c25 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1865,7 +1865,7 @@ syscall_restore: /* Are we being ptraced? */ ldw TASK_FLAGS(%r1),%r19 - ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 + ldi _TIF_SYSCALL_TRACE_MASK,%r2 and,COND(=) %r19,%r2,%r0 b,n syscall_restore_rfi @@ -1978,15 +1978,23 @@ syscall_restore_rfi: /* sr2 should be set to zero for userspace syscalls */ STREG %r0,TASK_PT_SR2(%r1) -pt_regs_ok: LDREG TASK_PT_GR31(%r1),%r2 - depi 3,31,2,%r2 /* ensure return to user mode. */ - STREG %r2,TASK_PT_IAOQ0(%r1) + depi 3,31,2,%r2 /* ensure return to user mode. */ + STREG %r2,TASK_PT_IAOQ0(%r1) ldo 4(%r2),%r2 STREG %r2,TASK_PT_IAOQ1(%r1) + b intr_restore copy %r25,%r16 + +pt_regs_ok: + LDREG TASK_PT_IAOQ0(%r1),%r2 + depi 3,31,2,%r2 /* ensure return to user mode. */ + STREG %r2,TASK_PT_IAOQ0(%r1) + LDREG TASK_PT_IAOQ1(%r1),%r2 + depi 3,31,2,%r2 + STREG %r2,TASK_PT_IAOQ1(%r1) b intr_restore - nop + copy %r25,%r16 .import schedule,code syscall_do_resched: diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index c0b1affc06a8..0299d63cd112 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -410,11 +410,13 @@ void __init init_IRQ(void) { local_irq_disable(); /* PARANOID - should already be disabled */ mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ - claim_cpu_irqs(); #ifdef CONFIG_SMP - if (!cpu_eiem) + if (!cpu_eiem) { + claim_cpu_irqs(); cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); + } #else + claim_cpu_irqs(); cpu_eiem = EIEM_MASK(TIMER_IRQ); #endif set_eiem(cpu_eiem); /* EIEM : enable all external intr */ diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 857c2f545470..534abd4936e1 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -26,7 +26,7 @@ #include <asm/asm-offsets.h> /* PSW bits we allow the debugger to modify */ -#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) +#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) /* * Called by kernel/ptrace.c when detaching.. diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 537996955998..fd051705a407 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -190,8 +190,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", (unsigned long)ka, sp, frame_size); + /* Align alternate stack and reserve 64 bytes for the signal + handler's frame marker. */ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) - sp = current->sas_ss_sp; /* Stacks grow up! */ + sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); return (void __user *) sp; /* Stacks grow up. Fun. */ diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 9071e093164a..933423fa5144 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h @@ -347,16 +347,15 @@ Sgl_isinexact_to_fix(sgl_value,exponent) #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \ - {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \ + {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH; \ if (exponent <= 31) { \ - Dintp1(dresultA) = 0; \ - Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \ + Dintp1(dresultA) = 0; \ + Dintp2(dresultB) = val >> (31 - exponent); \ } \ else { \ - Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \ - Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \ + Dintp1(dresultA) = val >> (63 - exponent); \ + Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \ } \ - Sall(sgl_value) >>= SGL_EXP_LENGTH; /* return to original */ \ } #define Duint_setzero(dresultA,dresultB) \ diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4428fd178bce..6774c17a5576 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -340,9 +340,6 @@ int x86_setup_perfctr(struct perf_event *event) /* BTS is currently only allowed for user-mode. */ if (!attr->exclude_kernel) return -EOPNOTSUPP; - - if (!attr->exclude_guest) - return -EOPNOTSUPP; } hwc->config |= config; @@ -385,9 +382,6 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.precise_ip) { int precise = 0; - if (!event->attr.exclude_guest) - return -EOPNOTSUPP; - /* Support for constant skid */ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { precise++; diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index ff84d5469d77..6ed91d9980e2 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback) lea 16(%esp),%esp CFI_ADJUST_CFA_OFFSET -16 jz 5f - addl $16,%esp jmp iret_exc 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index cd3b2438a980..9b4d51d0c0d0 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -165,10 +165,11 @@ void set_task_blockstep(struct task_struct *task, bool on) * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. - * FIXME: this means that set/clear TIF_BLOCKSTEP is simply - * wrong if task != current, SIGKILL can wakeup the stopped - * tracee and set/clear can play with the running task, this - * can confuse the next __switch_to_xtra(). + * + * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if + * task is current or it can't be running, otherwise we can race + * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but + * PTRACE_KILL is not safe. */ local_irq_disable(); debugctl = get_debugctlmsr(); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 4f7d2599b484..34bc4cee8887 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -432,13 +432,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ play_dead_common(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); - /* - * Balance out the preempt calls - as we are running in cpu_idle - * loop which has been called at bootup from cpu_bringup_and_idle. - * The cpucpu_bringup_and_idle called cpu_bringup which made a - * preempt_disable() So this preempt_enable will balance it out. - */ - preempt_enable(); } #else /* !CONFIG_HOTPLUG_CPU */ diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7862d17976b7..497912732566 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -53,6 +53,7 @@ enum { AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_ENMOTUS = 2, AHCI_PCI_BAR_STANDARD = 5, }; @@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + /* Enmotus */ + { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, + /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, @@ -1098,9 +1102,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "PDC42819 can only drive SATA devices with this driver\n"); - /* The Connext uses non-standard BAR */ + /* Both Connext and Enmotus devices use non-standard BARs */ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) ahci_pci_bar = AHCI_PCI_BAR_STA2X11; + else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) + ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; /* acquire resources */ rc = pcim_enable_device(pdev); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 320712a7b9ea..6cd7805e47ca 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1951,13 +1951,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) /* Use the nominal value 10 ms if the read MDAT is zero, * the nominal value of DETO is 20 ms. */ - if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] & + if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] & ATA_LOG_DEVSLP_VALID_MASK) { - mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] & + mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] & ATA_LOG_DEVSLP_MDAT_MASK; if (!mdat) mdat = 10; - deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO]; + deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO]; if (!deto) deto = 20; } else { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9e8b99af400d..46cd3f4c6aaa 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev) } } - /* check and mark DevSlp capability */ - if (ata_id_has_devslp(dev->id)) - dev->flags |= ATA_DFLAG_DEVSLP; - - /* Obtain SATA Settings page from Identify Device Data Log, - * which contains DevSlp timing variables etc. - * Exclude old devices with ata_id_has_ncq() + /* Check and mark DevSlp capability. Get DevSlp timing variables + * from SATA Settings page of Identify Device Data Log. */ - if (ata_id_has_ncq(dev->id)) { + if (ata_id_has_devslp(dev->id)) { + u8 sata_setting[ATA_SECT_SIZE]; + int i, j; + + dev->flags |= ATA_DFLAG_DEVSLP; err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, ATA_LOG_SATA_SETTINGS, - dev->sata_settings, + sata_setting, 1); if (err_mask) ata_dev_dbg(dev, "failed to get Identify Device Data, Emask 0x%x\n", err_mask); + else + for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { + j = ATA_LOG_DEVSLP_OFFSET + i; + dev->devslp_timing[i] = sata_setting[j]; + } } dev->cdb_len = 16; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index bf039b0e97b7..bcf4437214f5 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, */ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) { - if (qc->flags & AC_ERR_MEDIA) + if (qc->err_mask & AC_ERR_MEDIA) return 0; /* don't retry media errors */ if (qc->flags & ATA_QCFLAG_IO) return 1; /* otherwise retry anything from fs stack */ diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 9d8409c02082..8ad21a25bc0d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int index = vblk->index; + int refc; /* Prevent config work handler from accessing the device. */ mutex_lock(&vblk->config_lock); @@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev) flush_work(&vblk->config_work); + refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); put_disk(vblk->disk); mempool_destroy(vblk->pool); vdev->config->del_vqs(vdev); kfree(vblk); - ida_simple_remove(&vd_index_ida, index); + + /* Only free device id if we don't have any users */ + if (refc == 1) + ida_simple_remove(&vd_index_ida, index); } #ifdef CONFIG_PM diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 7d9bd94be8d2..6819d63cb167 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -547,7 +547,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); if (! mvchip->membase) { dev_err(&pdev->dev, "Cannot ioremap\n"); - kfree(mvchip->chip.label); return -ENOMEM; } @@ -557,14 +556,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (! res) { dev_err(&pdev->dev, "Cannot get memory resource\n"); - kfree(mvchip->chip.label); return -ENODEV; } mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res); if (! mvchip->percpu_membase) { dev_err(&pdev->dev, "Cannot ioremap\n"); - kfree(mvchip->chip.label); return -ENOMEM; } } @@ -625,7 +622,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); if (mvchip->irqbase < 0) { dev_err(&pdev->dev, "no irqs\n"); - kfree(mvchip->chip.label); return -ENOMEM; } @@ -633,7 +629,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip->membase, handle_level_irq); if (! gc) { dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); - kfree(mvchip->chip.label); return -ENOMEM; } @@ -668,7 +663,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); kfree(gc); - kfree(mvchip->chip.label); return -ENODEV; } diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 01f7fe955590..76be7eed79de 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -32,7 +32,6 @@ #include <mach/hardware.h> #include <mach/map.h> -#include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> @@ -446,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = { }; #endif -#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) +#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250) static struct samsung_gpio_cfg exynos_gpio_cfg = { .set_pull = exynos_gpio_setpull, .get_pull = exynos_gpio_getpull, @@ -2446,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = { }; #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250 static struct samsung_gpio_chip exynos5_gpios_1[] = { { .chip = { @@ -2614,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { }; #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250 static struct samsung_gpio_chip exynos5_gpios_2[] = { { .chip = { @@ -2675,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = { }; #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250 static struct samsung_gpio_chip exynos5_gpios_3[] = { { .chip = { @@ -2711,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = { }; #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250 static struct samsung_gpio_chip exynos5_gpios_4[] = { { .chip = { @@ -3010,7 +3009,7 @@ static __init int samsung_gpiolib_init(void) int i, nr_chips; int group = 0; -#ifdef CONFIG_PINCTRL_SAMSUNG +#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440) /* * This gpio driver includes support for device tree support and there * are platforms using it. In order to maintain compatibility with those @@ -3026,6 +3025,7 @@ static __init int samsung_gpiolib_init(void) static const struct of_device_id exynos_pinctrl_ids[] = { { .compatible = "samsung,pinctrl-exynos4210", }, { .compatible = "samsung,pinctrl-exynos4x12", }, + { .compatible = "samsung,pinctrl-exynos5440", }, }; for_each_matching_node(pctrl_np, exynos_pinctrl_ids) if (pctrl_np && of_device_is_available(pctrl_np)) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e6a11ca85eaf..7944d301518a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -641,6 +641,7 @@ static void i915_ring_error_state(struct seq_file *m, seq_printf(m, "%s command stream:\n", ring_str(ring)); seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); + seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); @@ -693,6 +694,8 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "EIR: 0x%08x\n", error->eir); seq_printf(m, "IER: 0x%08x\n", error->ier); seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); + seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); + seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); seq_printf(m, "CCID: 0x%08x\n", error->ccid); for (i = 0; i < dev_priv->num_fence_regs; i++) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed3059575576..12ab3bdea54d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -188,10 +188,13 @@ struct drm_i915_error_state { u32 pgtbl_er; u32 ier; u32 ccid; + u32 derrmr; + u32 forcewake; bool waiting[I915_NUM_RINGS]; u32 pipestat[I915_MAX_PIPES]; u32 tail[I915_NUM_RINGS]; u32 head[I915_NUM_RINGS]; + u32 ctl[I915_NUM_RINGS]; u32 ipeir[I915_NUM_RINGS]; u32 ipehr[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS]; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d6a994a07393..26d08bb58218 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, total = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; + u64 invalid_offset = (u64)-1; + int j; user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; @@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } + /* As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + for (j = 0; j < exec[i].relocation_count; j++) { + if (copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + } + reloc_offset[i] = total; total += exec[i].relocation_count; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2220dec3e5d9..fe843389c7b4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev, error->acthd[ring->id] = intel_ring_get_active_head(ring); error->head[ring->id] = I915_READ_HEAD(ring); error->tail[ring->id] = I915_READ_TAIL(ring); + error->ctl[ring->id] = I915_READ_CTL(ring); error->cpu_ring_head[ring->id] = ring->head; error->cpu_ring_tail[ring->id] = ring->tail; @@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev) else error->ier = I915_READ(IER); + if (INTEL_INFO(dev)->gen >= 6) + error->derrmr = I915_READ(DERRMR); + + if (IS_VALLEYVIEW(dev)) + error->forcewake = I915_READ(FORCEWAKE_VLV); + else if (INTEL_INFO(dev)->gen >= 7) + error->forcewake = I915_READ(FORCEWAKE_MT); + else if (INTEL_INFO(dev)->gen == 6) + error->forcewake = I915_READ(FORCEWAKE); + for_each_pipe(pipe) error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 186ee5c85b51..b401788e1791 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -512,6 +512,8 @@ #define GEN7_ERR_INT 0x44040 #define ERR_INT_MMIO_UNCLAIMED (1<<13) +#define DERRMR 0x44050 + /* GM45+ chicken bits -- debug workaround bits that may be required * for various sorts of correct behavior. The top 16 bits of each are * the enables for writing to the corresponding low bit. diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1b63d55318a0..fb3715b4b09d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect static void intel_dp_init_panel_power_sequencer(struct drm_device *dev, - struct intel_dp *intel_dp) + struct intel_dp *intel_dp, + struct edp_power_seq *out) { struct drm_i915_private *dev_priv = dev->dev_private; struct edp_power_seq cur, vbt, spec, final; @@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, intel_dp->panel_power_cycle_delay = get_delay(t11_t12); #undef get_delay + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); + + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + if (out) + *out = final; +} + +static void +intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, + struct intel_dp *intel_dp, + struct edp_power_seq *seq) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_on, pp_off, pp_div; + /* And finally store the new values in the power sequencer. */ - pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | - (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); + pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | + (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); + pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | + (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); /* Compute the divisor for the pp clock, simply match the Bspec * formula. */ pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; - pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) + pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) << PANEL_POWER_CYCLE_DELAY_SHIFT); /* Haswell doesn't have any port selection bits for the panel @@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); I915_WRITE(PCH_PP_DIVISOR, pp_div); - - DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", - intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, - intel_dp->panel_power_cycle_delay); - - DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", - intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", I915_READ(PCH_PP_ON_DELAYS), I915_READ(PCH_PP_OFF_DELAYS), @@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *fixed_mode = NULL; + struct edp_power_seq power_seq = { 0 }; enum port port = intel_dig_port->port; const char *name = NULL; int type; @@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, } if (is_edp(intel_dp)) - intel_dp_init_panel_power_sequencer(dev, intel_dp); + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); intel_dp_i2c_init(intel_dp, intel_connector, name); @@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, return; } + /* We now know it's not a ghost, init power sequence regs. */ + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, + &power_seq); + ironlake_edp_panel_vdd_on(intel_dp); edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e83a11794172..3280cffe50f4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4250,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); } static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) @@ -4267,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), FORCEWAKE_ACK_TIMEOUT_MS)) @@ -4304,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ + /* something from same cacheline, but !FORCEWAKE */ + POSTING_READ(ECOBUS); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); gen6_gt_check_fifodbg(dev_priv); } @@ -4351,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_VLV */ + POSTING_READ(FORCEWAKE_ACK_VLV); } static void vlv_force_wake_get(struct drm_i915_private *dev_priv) @@ -4371,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* The below doubles as a POSTING_READ */ + /* something from same cacheline, but !FORCEWAKE_VLV */ + POSTING_READ(FORCEWAKE_ACK_VLV); gen6_gt_check_fifodbg(dev_priv); } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 061fa0a28900..4d0e60adbc6d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2401,6 +2401,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) { struct evergreen_mc_save save; + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + if (reset_mask == 0) return 0; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 896f1cbc58a5..59acabb45c9b 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1409,6 +1409,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) { struct evergreen_mc_save save; + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + if (reset_mask == 0) return 0; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 537e259b3837..3cb9d6089373 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) { struct rv515_mc_save save; + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + if (reset_mask == 0) return 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 34e52304a525..a08f657329a0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -324,7 +324,6 @@ struct radeon_bo { struct list_head list; /* Protected by tbo.reserved */ u32 placements[3]; - u32 busy_placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; @@ -654,6 +653,8 @@ struct radeon_ring { u32 ptr_reg_mask; u32 nop; u32 idx; + u64 last_semaphore_signal_addr; + u64 last_semaphore_wait_addr; }; /* diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dff6cf77f953..d9bf96ee299a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -69,9 +69,10 @@ * 2.26.0 - r600-eg: fix htile size computation * 2.27.0 - r600-SI: Add CS ioctl support for async DMA * 2.28.0 - r600-eg: Add MEM_WRITE packet support + * 2.29.0 - R500 FP16 color clear registers */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 28 +#define KMS_DRIVER_MINOR 29 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 883c95d8d90f..d3aface2d12d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placement.fpfn = 0; rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; + rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; @@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) if (!c) rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; rbo->placement.num_placement = c; - - c = 0; - rbo->placement.busy_placement = rbo->busy_placements; - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; - } else { - rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; - } rbo->placement.num_busy_placement = c; } @@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head) { struct radeon_bo_list *lobj; struct radeon_bo *bo; + u32 domain; int r; r = ttm_eu_reserve_buffers(head); @@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head) list_for_each_entry(lobj, head, tv.head) { bo = lobj->bo; if (!bo->pin_count) { + domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; + + retry: + radeon_ttm_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (unlikely(r)) { + if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { + domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } return r; } } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 141f2b6a9cf2..2430d80b1871 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -784,6 +784,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) } seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); + seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); + seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); /* print 8 dw before current rptr as often it's the last executed diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 97f3ece81cd2..8dcc20f53d73 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, /* we assume caller has already allocated space on waiters ring */ radeon_semaphore_emit_wait(rdev, waiter, semaphore); + /* for debugging lockup only, used by sysfs debug files */ + rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; + rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; + return 0; } diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 911a8fbd32bb..78d5e99d759d 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -324,6 +324,8 @@ rv515 0x6d40 0x46AC US_OUT_FMT_2 0x46B0 US_OUT_FMT_3 0x46B4 US_W_FMT +0x46C0 RB3D_COLOR_CLEAR_VALUE_AR +0x46C4 RB3D_COLOR_CLEAR_VALUE_GB 0x4BC0 FG_FOG_BLEND 0x4BC4 FG_FOG_FACTOR 0x4BC8 FG_FOG_COLOR_R diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3240a3d64f30..ae8b48205a6c 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) { struct evergreen_mc_save save; + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + if (reset_mask == 0) return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 33d20be87db5..52b20b12c83a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, bo->mem = tmp_mem; bdev->driver->move_notify(bo, mem); bo->mem = *mem; + *mem = tmp_mem; } goto out_err; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d73d6e3e17b2..44420fca7dfa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (ttm->state == tt_unpopulated) { ret = ttm->bdev->driver->ttm_tt_populate(ttm); - if (ret) + if (ret) { + /* if we fail here don't nuke the mm node + * as the bo still owns it */ + old_copy.mm_node = NULL; goto out1; + } } add = 0; @@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, prot); } else ret = ttm_copy_io_page(new_iomap, old_iomap, page); - if (ret) + if (ret) { + /* failing here, means keep old copy as-is */ + old_copy.mm_node = NULL; goto out1; + } } mb(); out2: diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index f6c0011a0337..dd289fd179ca 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -403,7 +403,7 @@ struct dm_info_header { */ struct dm_info_msg { - struct dm_info_header header; + struct dm_header hdr; __u32 reserved; __u32 info_size; __u8 info[]; @@ -503,13 +503,17 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg) static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) { - switch (msg->header.type) { + struct dm_info_header *info_hdr; + + info_hdr = (struct dm_info_header *)msg->info; + + switch (info_hdr->type) { case INFO_TYPE_MAX_PAGE_CNT: pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); - pr_info("Data Size is %d\n", msg->header.data_size); + pr_info("Data Size is %d\n", info_hdr->data_size); break; default: - pr_info("Received Unknown type: %d\n", msg->header.type); + pr_info("Received Unknown type: %d\n", info_hdr->type); } } @@ -879,7 +883,7 @@ static int balloon_probe(struct hv_device *dev, balloon_onchannelcallback, dev); if (ret) - return ret; + goto probe_error0; dm_device.dev = dev; dm_device.state = DM_INITIALIZING; @@ -891,7 +895,7 @@ static int balloon_probe(struct hv_device *dev, kthread_run(dm_thread_func, &dm_device, "hv_balloon"); if (IS_ERR(dm_device.thread)) { ret = PTR_ERR(dm_device.thread); - goto probe_error0; + goto probe_error1; } hv_set_drvdata(dev, &dm_device); @@ -914,12 +918,12 @@ static int balloon_probe(struct hv_device *dev, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) - goto probe_error1; + goto probe_error2; t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); if (t == 0) { ret = -ETIMEDOUT; - goto probe_error1; + goto probe_error2; } /* @@ -928,7 +932,7 @@ static int balloon_probe(struct hv_device *dev, */ if (dm_device.state == DM_INIT_ERROR) { ret = -ETIMEDOUT; - goto probe_error1; + goto probe_error2; } /* * Now submit our capabilities to the host. @@ -961,12 +965,12 @@ static int balloon_probe(struct hv_device *dev, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) - goto probe_error1; + goto probe_error2; t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); if (t == 0) { ret = -ETIMEDOUT; - goto probe_error1; + goto probe_error2; } /* @@ -975,18 +979,20 @@ static int balloon_probe(struct hv_device *dev, */ if (dm_device.state == DM_INIT_ERROR) { ret = -ETIMEDOUT; - goto probe_error1; + goto probe_error2; } dm_device.state = DM_INITIALIZED; return 0; -probe_error1: +probe_error2: kthread_stop(dm_device.thread); -probe_error0: +probe_error1: vmbus_close(dev->channel); +probe_error0: + kfree(send_buffer); return ret; } @@ -999,6 +1005,7 @@ static int balloon_remove(struct hv_device *dev) vmbus_close(dev->channel); kthread_stop(dm->thread); + kfree(send_buffer); return 0; } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 26ffd3e3fb74..2c113de94323 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -44,7 +44,6 @@ extern bool pciehp_poll_mode; extern int pciehp_poll_time; extern bool pciehp_debug; extern bool pciehp_force; -extern struct workqueue_struct *pciehp_wq; #define dbg(format, arg...) \ do { \ @@ -78,6 +77,7 @@ struct slot { struct hotplug_slot *hotplug_slot; struct delayed_work work; /* work for button event */ struct mutex lock; + struct workqueue_struct *wq; }; struct event_info { diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 916bf4f53aba..939bd1d4b5b1 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -42,7 +42,6 @@ bool pciehp_debug; bool pciehp_poll_mode; int pciehp_poll_time; bool pciehp_force; -struct workqueue_struct *pciehp_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -340,18 +339,13 @@ static int __init pcied_init(void) { int retval = 0; - pciehp_wq = alloc_workqueue("pciehp", 0, 0); - if (!pciehp_wq) - return -ENOMEM; - pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); dbg("pcie_port_service_register = %d\n", retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); - if (retval) { - destroy_workqueue(pciehp_wq); + if (retval) dbg("Failure to register service\n"); - } + return retval; } @@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void) { dbg("unload_pciehpd()\n"); pcie_port_service_unregister(&hpdriver_portdrv); - destroy_workqueue(pciehp_wq); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 27f44295a657..38f018679175 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) info->p_slot = p_slot; INIT_WORK(&info->work, interrupt_event_handler); - queue_work(pciehp_wq, &info->work); + queue_work(p_slot->wq, &info->work); return 0; } @@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) kfree(info); goto out; } - queue_work(pciehp_wq, &info->work); + queue_work(p_slot->wq, &info->work); out: mutex_unlock(&p_slot->lock); } @@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot) if (ATTN_LED(ctrl)) pciehp_set_attention_status(p_slot, 0); - queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); + queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: @@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot) else p_slot->state = POWERON_STATE; - queue_work(pciehp_wq, &info->work); + queue_work(p_slot->wq, &info->work); } static void interrupt_event_handler(struct work_struct *work) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 13b2eaf7ba43..5127f3f41821 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl) static int pcie_init_slot(struct controller *ctrl) { struct slot *slot; + char name[32]; slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; + snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl)); + slot->wq = alloc_workqueue(name, 0, 0); + if (!slot->wq) + goto abort; + slot->ctrl = ctrl; mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); ctrl->slot = slot; return 0; +abort: + kfree(slot); + return -ENOMEM; } static void pcie_cleanup_slot(struct controller *ctrl) { struct slot *slot = ctrl->slot; cancel_delayed_work(&slot->work); - flush_workqueue(pciehp_wq); + destroy_workqueue(slot->wq); kfree(slot); } diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index ca64932e658b..b849f995075a 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -46,8 +46,6 @@ extern bool shpchp_poll_mode; extern int shpchp_poll_time; extern bool shpchp_debug; -extern struct workqueue_struct *shpchp_wq; -extern struct workqueue_struct *shpchp_ordered_wq; #define dbg(format, arg...) \ do { \ @@ -91,6 +89,7 @@ struct slot { struct list_head slot_list; struct delayed_work work; /* work for button event */ struct mutex lock; + struct workqueue_struct *wq; u8 hp_slot; }; diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index b6de307248e4..3100c52c837c 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -39,8 +39,6 @@ bool shpchp_debug; bool shpchp_poll_mode; int shpchp_poll_time; -struct workqueue_struct *shpchp_wq; -struct workqueue_struct *shpchp_ordered_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl) slot->device = ctrl->slot_device_offset + i; slot->hpc_ops = ctrl->hpc_ops; slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); + + snprintf(name, sizeof(name), "shpchp-%d", slot->number); + slot->wq = alloc_workqueue(name, 0, 0); + if (!slot->wq) { + retval = -ENOMEM; + goto error_info; + } + mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); @@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl) if (retval) { ctrl_err(ctrl, "pci_hp_register failed with error %d\n", retval); - goto error_info; + goto error_slotwq; } get_power_status(hotplug_slot, &info->power_status); @@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl) } return 0; +error_slotwq: + destroy_workqueue(slot->wq); error_info: kfree(info); error_hpslot: @@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl) slot = list_entry(tmp, struct slot, slot_list); list_del(&slot->slot_list); cancel_delayed_work(&slot->work); - flush_workqueue(shpchp_wq); - flush_workqueue(shpchp_ordered_wq); + destroy_workqueue(slot->wq); pci_hp_deregister(slot->hotplug_slot); } } @@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = { static int __init shpcd_init(void) { - int retval = 0; - - shpchp_wq = alloc_ordered_workqueue("shpchp", 0); - if (!shpchp_wq) - return -ENOMEM; - - shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); - if (!shpchp_ordered_wq) { - destroy_workqueue(shpchp_wq); - return -ENOMEM; - } + int retval; retval = pci_register_driver(&shpc_driver); dbg("%s: pci_register_driver = %d\n", __func__, retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); - if (retval) { - destroy_workqueue(shpchp_ordered_wq); - destroy_workqueue(shpchp_wq); - } + return retval; } @@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void) { dbg("unload_shpchpd()\n"); pci_unregister_driver(&shpc_driver); - destroy_workqueue(shpchp_ordered_wq); - destroy_workqueue(shpchp_wq); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index f9b5a52e4115..58499277903a 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) info->p_slot = p_slot; INIT_WORK(&info->work, interrupt_event_handler); - queue_work(shpchp_wq, &info->work); + queue_work(p_slot->wq, &info->work); return 0; } @@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) kfree(info); goto out; } - queue_work(shpchp_ordered_wq, &info->work); + queue_work(p_slot->wq, &info->work); out: mutex_unlock(&p_slot->lock); } @@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot) p_slot->hpc_ops->green_led_blink(p_slot); p_slot->hpc_ops->set_attention_status(p_slot, 0); - queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); + queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 6c8bc5809787..fde4a32a0295 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -82,4 +82,4 @@ endchoice config PCIE_PME def_bool y - depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI + depends on PCIEPORTBUS && PM_RUNTIME && ACPI diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 421bbc5fee32..564d97f94b6c 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work) continue; } do_recovery(pdev, entry.severity); + pci_dev_put(pdev); } } #endif diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b52630b8eada..8474b6a4fc9b 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus) { struct pci_dev *child; + if (aspm_force) + return; + /* * Clear any ASPM setup that the firmware has carried out on this bus */ diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index fb31b457a56a..c5ceb9d90ea8 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -239,7 +239,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p) struct mxs_lradc *lradc = iio_priv(iio); const uint32_t chan_value = LRADC_CH_ACCUMULATE | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); - int i, j = 0; + unsigned int i, j = 0; for_each_set_bit(i, iio->active_scan_mask, iio->masklength) { lradc->buffer[j] = readl(lradc->base + LRADC_CH(j)); diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c index 3525a68d6a75..41d7350d030f 100644 --- a/drivers/staging/iio/gyro/adis16080_core.c +++ b/drivers/staging/iio/gyro/adis16080_core.c @@ -69,7 +69,7 @@ static int adis16080_spi_read(struct iio_dev *indio_dev, ret = spi_read(st->us, st->buf, 2); if (ret == 0) - *val = ((st->buf[0] & 0xF) << 8) | st->buf[1]; + *val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11); mutex_unlock(&st->buf_lock); return ret; diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c index 131afd0c460c..9464f3874346 100644 --- a/drivers/staging/sb105x/sb_pci_mp.c +++ b/drivers/staging/sb105x/sb_pci_mp.c @@ -3054,7 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd) sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16); } break; -#ifdef CONFIG_PARPORT +#ifdef CONFIG_PARPORT_PC case PCI_DEVICE_ID_MP2S1P : sbdev->nr_ports = 2; diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h index 6b2ec390e775..806cbf72fb59 100644 --- a/drivers/staging/vt6656/bssdb.h +++ b/drivers/staging/vt6656/bssdb.h @@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject { } SRSNCapObject, *PSRSNCapObject; // BSS info(AP) -#pragma pack(1) typedef struct tagKnownBSS { // BSS info BOOL bActive; diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h index 5d8faf9f96ec..e0d2b07ba608 100644 --- a/drivers/staging/vt6656/int.h +++ b/drivers/staging/vt6656/int.h @@ -34,7 +34,6 @@ #include "device.h" /*--------------------- Export Definitions -------------------------*/ -#pragma pack(1) typedef struct tagSINTData { BYTE byTSR0; BYTE byPkt0; diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h index 22710cef751d..ae6e2d237b20 100644 --- a/drivers/staging/vt6656/iocmd.h +++ b/drivers/staging/vt6656/iocmd.h @@ -95,13 +95,12 @@ typedef enum tagWZONETYPE { // Ioctl interface structure // Command structure // -#pragma pack(1) typedef struct tagSCmdRequest { u8 name[16]; void *data; u16 wResult; u16 wCmdCode; -} SCmdRequest, *PSCmdRequest; +} __packed SCmdRequest, *PSCmdRequest; // // Scan @@ -111,7 +110,7 @@ typedef struct tagSCmdScan { u8 ssid[SSID_MAXLEN + 2]; -} SCmdScan, *PSCmdScan; +} __packed SCmdScan, *PSCmdScan; // // BSS Join @@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin { BOOL bPSEnable; BOOL bShareKeyAuth; -} SCmdBSSJoin, *PSCmdBSSJoin; +} __packed SCmdBSSJoin, *PSCmdBSSJoin; // // Zonetype Setting @@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet { BOOL bWrite; WZONETYPE ZoneType; -} SCmdZoneTypeSet, *PSCmdZoneTypeSet; +} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet; typedef struct tagSWPAResult { char ifname[100]; @@ -145,7 +144,7 @@ typedef struct tagSWPAResult { u8 key_mgmt; u8 eap_type; BOOL authenticated; -} SWPAResult, *PSWPAResult; +} __packed SWPAResult, *PSWPAResult; typedef struct tagSCmdStartAP { @@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP { BOOL bShareKeyAuth; u8 byBasicRate; -} SCmdStartAP, *PSCmdStartAP; +} __packed SCmdStartAP, *PSCmdStartAP; typedef struct tagSCmdSetWEP { @@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP { BOOL bWepKeyAvailable[WEP_NKEYS]; u32 auWepKeyLength[WEP_NKEYS]; -} SCmdSetWEP, *PSCmdSetWEP; +} __packed SCmdSetWEP, *PSCmdSetWEP; typedef struct tagSBSSIDItem { @@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem { BOOL bWEPOn; u32 uRSSI; -} SBSSIDItem; +} __packed SBSSIDItem; typedef struct tagSBSSIDList { u32 uItem; SBSSIDItem sBSSIDList[0]; -} SBSSIDList, *PSBSSIDList; +} __packed SBSSIDList, *PSBSSIDList; typedef struct tagSNodeItem { @@ -208,7 +207,7 @@ typedef struct tagSNodeItem { u32 uTxAttempts; u16 wFailureRatio; -} SNodeItem; +} __packed SNodeItem; typedef struct tagSNodeList { @@ -216,7 +215,7 @@ typedef struct tagSNodeList { u32 uItem; SNodeItem sNodeList[0]; -} SNodeList, *PSNodeList; +} __packed SNodeList, *PSNodeList; typedef struct tagSCmdLinkStatus { @@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus { u32 uChannel; u32 uLinkRate; -} SCmdLinkStatus, *PSCmdLinkStatus; +} __packed SCmdLinkStatus, *PSCmdLinkStatus; // // 802.11 counter @@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount { u32 ReceivedFragmentCount; u32 MulticastReceivedFrameCount; u32 FCSErrorCount; -} SDot11MIBCount, *PSDot11MIBCount; +} __packed SDot11MIBCount, *PSDot11MIBCount; @@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount { u32 ullTxBroadcastBytes[2]; u32 ullTxMulticastBytes[2]; u32 ullTxDirectedBytes[2]; -} SStatMIBCount, *PSStatMIBCount; +} __packed SStatMIBCount, *PSStatMIBCount; typedef struct tagSCmdValue { u32 dwValue; -} SCmdValue, *PSCmdValue; +} __packed SCmdValue, *PSCmdValue; // // hostapd & viawget ioctl related @@ -431,7 +430,7 @@ struct viawget_hostapd_param { u8 ssid[32]; } scan_req; } u; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h index 959c8868f6e2..2522ddec718d 100644 --- a/drivers/staging/vt6656/iowpa.h +++ b/drivers/staging/vt6656/iowpa.h @@ -67,12 +67,11 @@ enum { -#pragma pack(1) typedef struct viawget_wpa_header { u8 type; u16 req_ie_len; u16 resp_ie_len; -} viawget_wpa_header; +} __packed viawget_wpa_header; struct viawget_wpa_param { u32 cmd; @@ -113,9 +112,8 @@ struct viawget_wpa_param { u8 *buf; } scan_results; } u; -}; +} __packed; -#pragma pack(1) struct viawget_scan_result { u8 bssid[6]; u8 ssid[32]; @@ -130,7 +128,7 @@ struct viawget_scan_result { int noise; int level; int maxrate; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 4efa9bc0fcf0..89bfd858bb28 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp) /* SSID */ req->ssid.status = P80211ENUM_msgitem_status_data_ok; req->ssid.data.len = le16_to_cpu(item->ssid.len); - req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN); + req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN); memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); /* supported rates */ diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index be6a373601b7..79ff3a5e925d 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -441,6 +441,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty, return pty_get_pktmode(tty, (int __user *)arg); case TIOCSIG: /* Send signal to other side of pty */ return pty_signal(tty, (int) arg); + case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */ + return -EINVAL; } return -ENOIOCTLCMD; } diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c index d085e3a8ec06..f9320437a649 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250.c @@ -300,6 +300,12 @@ static const struct serial8250_config uart_config[] = { UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00, .flags = UART_CAP_FIFO, }, + [PORT_BRCM_TRUMANAGE] = { + .name = "TruManage", + .fifo_size = 1, + .tx_loadsz = 1024, + .flags = UART_CAP_HFIFO, + }, [PORT_8250_CIR] = { .name = "CIR port" } @@ -1490,6 +1496,11 @@ void serial8250_tx_chars(struct uart_8250_port *up) port->icount.tx++; if (uart_circ_empty(xmit)) break; + if (up->capabilities & UART_CAP_HFIFO) { + if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) != + BOTH_EMPTY) + break; + } } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 3b4ea84898c2..12caa1292b75 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -40,6 +40,7 @@ struct serial8250_config { #define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */ #define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */ #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ +#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */ #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 1d0dba2d562d..096d2ef48b32 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p) } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { /* Clear the USR and write the LCR again. */ (void)p->serial_in(p, UART_USR); - p->serial_out(p, d->last_lcr, UART_LCR); + p->serial_out(p, UART_LCR, d->last_lcr); return 1; } diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 26b9dc012ed0..a27a98e1b066 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv, return setup_port(priv, port, 2, idx * 8, 0); } +static int +pci_brcm_trumanage_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + int ret = pci_default_setup(priv, board, port, idx); + + port->port.type = PORT_BRCM_TRUMANAGE; + port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); + return ret; +} + static int skip_tx_en_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) @@ -1301,9 +1313,10 @@ pci_wch_ch353_setup(struct serial_private *priv, #define PCI_VENDOR_ID_AGESTAR 0x5372 #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 #define PCI_VENDOR_ID_ASIX 0x9710 -#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019 #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 +#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 +#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ @@ -1954,6 +1967,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .setup = pci_xr17v35x_setup, }, /* + * Broadcom TruManage (NetXtreme) + */ + { + .vendor = PCI_VENDOR_ID_BROADCOM, + .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_brcm_trumanage_setup, + }, + + /* * Default "match everything" terminator entry */ { @@ -2148,6 +2172,7 @@ enum pci_board_num_t { pbn_ce4100_1_115200, pbn_omegapci, pbn_NETMOS9900_2s_115200, + pbn_brcm_trumanage, }; /* @@ -2246,7 +2271,7 @@ static struct pciserial_board pci_boards[] = { [pbn_b0_8_1152000_200] = { .flags = FL_BASE0, - .num_ports = 2, + .num_ports = 8, .base_baud = 1152000, .uart_offset = 0x200, }, @@ -2892,6 +2917,12 @@ static struct pciserial_board pci_boards[] = { .num_ports = 2, .base_baud = 115200, }, + [pbn_brcm_trumanage] = { + .flags = FL_BASE0, + .num_ports = 1, + .reg_shift = 2, + .base_baud = 115200, + }, }; static const struct pci_device_id blacklist[] = { @@ -4471,6 +4502,13 @@ static struct pci_device_id serial_pci_tbl[] = { pbn_omegapci }, /* + * Broadcom TruManage + */ + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_brcm_trumanage }, + + /* * AgeStar as-prs2-009 */ { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 675d94ab0aff..8cb6d8d66a13 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -637,6 +637,7 @@ static void ifx_port_shutdown(struct tty_port *port) clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags); mrdy_set_low(ifx_dev); + del_timer(&ifx_dev->spi_timer); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); tasklet_kill(&ifx_dev->io_work_tasklet); } @@ -810,7 +811,8 @@ static void ifx_spi_io(unsigned long data) ifx_dev->spi_xfer.cs_change = 0; ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; /* ifx_dev->spi_xfer.speed_hz = 390625; */ - ifx_dev->spi_xfer.bits_per_word = spi_bpw; + ifx_dev->spi_xfer.bits_per_word = + ifx_dev->spi_dev->bits_per_word; ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 6db23b035efe..e55615eb34ad 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) struct circ_buf *xmit = &s->port.state->xmit; if (auart_dma_enabled(s)) { - int i = 0; + u32 i = 0; int size; void *buffer = s->tx_dma_buf; @@ -412,10 +412,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) u32 ctrl = readl(u->membase + AUART_CTRL2); - ctrl &= ~AUART_CTRL2_RTSEN; + ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS); if (mctrl & TIOCM_RTS) { if (tty_port_cts_enabled(&u->state->port)) ctrl |= AUART_CTRL2_RTSEN; + else + ctrl |= AUART_CTRL2_RTS; } s->ctrl = mctrl; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 12e5249d053e..e514b3a4dc57 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1006,7 +1006,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port, ucon &= ucon_mask; wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); - wr_regl(port, S3C2410_ULCON, cfg->ulcon); /* reset both fifos */ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index 8fd181436a6b..d5ed9f613005 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c @@ -604,7 +604,7 @@ static int vt8500_serial_probe(struct platform_device *pdev) vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); - if (vt8500_port->clk) { + if (!IS_ERR(vt8500_port->clk)) { vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); } else { /* use the default of 24Mhz if not specified and warn */ diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 58184f3de686..82afc4d6a327 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout, wait_queue_t wait; unsigned long flags; + if (!tty) + return; + if (!timeout) timeout = (HZ * EDGE_CLOSING_WAIT)/100; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 478adcfcdf26..0d9dac9e7f93 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -449,6 +449,10 @@ static void option_instat_callback(struct urb *urb); #define PETATEL_VENDOR_ID 0x1ff4 #define PETATEL_PRODUCT_NP10T 0x600e +/* TP-LINK Incorporated products */ +#define TPLINK_VENDOR_ID 0x2357 +#define TPLINK_PRODUCT_MA180 0x0201 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -930,7 +934,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, @@ -1311,6 +1316,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 4362d9e7baa3..f72323ef618f 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf, filled = 1; } else { /* Drop writes, fill reads with FF */ + filled = min((size_t)(x_end - pos), count); if (!iswrite) { char val = 0xFF; size_t i; - for (i = 0; i < x_end - pos; i++) { + for (i = 0; i < filled; i++) { if (put_user(val, buf + i)) goto out; } } - filled = x_end - pos; } count -= filled; diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 4dcfced107f5..084041d42c9a 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu) static int vcpu_online(unsigned int cpu) { int err; - char dir[32], state[32]; + char dir[16], state[16]; sprintf(dir, "cpu/%u", cpu); - err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); + err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state); if (err != 1) { if (!xen_initial_domain()) printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2e22df2f7a3f..3c8803feba26 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " static atomic_t pages_mapped = ATOMIC_INIT(0); static int use_ptemod; +#define populate_freeable_maps use_ptemod struct gntdev_priv { + /* maps with visible offsets in the file descriptor */ struct list_head maps; - /* lock protects maps from concurrent changes */ + /* maps that are not visible; will be freed on munmap. + * Only populated if populate_freeable_maps == 1 */ + struct list_head freeable_maps; + /* lock protects maps and freeable_maps */ spinlock_t lock; struct mm_struct *mm; struct mmu_notifier mn; @@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, return NULL; } -static void gntdev_put_map(struct grant_map *map) +static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) { if (!map) return; @@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map) evtchn_put(map->notify.event); } + if (populate_freeable_maps && priv) { + spin_lock(&priv->lock); + list_del(&map->next); + spin_unlock(&priv->lock); + } + if (map->pages && !use_ptemod) unmap_grant_pages(map, 0, map->count); gntdev_free_map(map); @@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { int pgno = (map->notify.addr >> PAGE_SHIFT); - if (pgno >= offset && pgno < offset + pages && use_ptemod) { - void __user *tmp = (void __user *) - map->vma->vm_start + map->notify.addr; - err = copy_to_user(tmp, &err, 1); - if (err) - return -EFAULT; - map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; - } else if (pgno >= offset && pgno < offset + pages) { - uint8_t *tmp = kmap(map->pages[pgno]); + if (pgno >= offset && pgno < offset + pages) { + /* No need for kmap, pages are in lowmem */ + uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; - kunmap(map->pages[pgno]); map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; } } @@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma) static void gntdev_vma_close(struct vm_area_struct *vma) { struct grant_map *map = vma->vm_private_data; + struct file *file = vma->vm_file; + struct gntdev_priv *priv = file->private_data; pr_debug("gntdev_vma_close %p\n", vma); - map->vma = NULL; + if (use_ptemod) { + /* It is possible that an mmu notifier could be running + * concurrently, so take priv->lock to ensure that the vma won't + * vanishing during the unmap_grant_pages call, since we will + * spin here until that completes. Such a concurrent call will + * not do any unmapping, since that has been done prior to + * closing the vma, but it may still iterate the unmap_ops list. + */ + spin_lock(&priv->lock); + map->vma = NULL; + spin_unlock(&priv->lock); + } vma->vm_private_data = NULL; - gntdev_put_map(map); + gntdev_put_map(priv, map); } static struct vm_operations_struct gntdev_vmops = { @@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = { /* ------------------------------------------------------------------ */ +static void unmap_if_in_range(struct grant_map *map, + unsigned long start, unsigned long end) +{ + unsigned long mstart, mend; + int err; + + if (!map->vma) + return; + if (map->vma->vm_start >= end) + return; + if (map->vma->vm_end <= start) + return; + mstart = max(start, map->vma->vm_start); + mend = min(end, map->vma->vm_end); + pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", + map->index, map->count, + map->vma->vm_start, map->vma->vm_end, + start, end, mstart, mend); + err = unmap_grant_pages(map, + (mstart - map->vma->vm_start) >> PAGE_SHIFT, + (mend - mstart) >> PAGE_SHIFT); + WARN_ON(err); +} + static void mn_invl_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); struct grant_map *map; - unsigned long mstart, mend; - int err; spin_lock(&priv->lock); list_for_each_entry(map, &priv->maps, next) { - if (!map->vma) - continue; - if (map->vma->vm_start >= end) - continue; - if (map->vma->vm_end <= start) - continue; - mstart = max(start, map->vma->vm_start); - mend = min(end, map->vma->vm_end); - pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", - map->index, map->count, - map->vma->vm_start, map->vma->vm_end, - start, end, mstart, mend); - err = unmap_grant_pages(map, - (mstart - map->vma->vm_start) >> PAGE_SHIFT, - (mend - mstart) >> PAGE_SHIFT); - WARN_ON(err); + unmap_if_in_range(map, start, end); + } + list_for_each_entry(map, &priv->freeable_maps, next) { + unmap_if_in_range(map, start, end); } spin_unlock(&priv->lock); } @@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn, err = unmap_grant_pages(map, /* offset */ 0, map->count); WARN_ON(err); } + list_for_each_entry(map, &priv->freeable_maps, next) { + if (!map->vma) + continue; + pr_debug("map %d+%d (%lx %lx)\n", + map->index, map->count, + map->vma->vm_start, map->vma->vm_end); + err = unmap_grant_pages(map, /* offset */ 0, map->count); + WARN_ON(err); + } spin_unlock(&priv->lock); } @@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) return -ENOMEM; INIT_LIST_HEAD(&priv->maps); + INIT_LIST_HEAD(&priv->freeable_maps); spin_lock_init(&priv->lock); if (use_ptemod) { @@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip) while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); list_del(&map->next); - gntdev_put_map(map); + gntdev_put_map(NULL /* already removed */, map); } + WARN_ON(!list_empty(&priv->freeable_maps)); if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); @@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { pr_debug("can't map: over limit\n"); - gntdev_put_map(map); + gntdev_put_map(NULL, map); return err; } if (copy_from_user(map->grants, &u->refs, sizeof(map->grants[0]) * op.count) != 0) { - gntdev_put_map(map); - return err; + gntdev_put_map(NULL, map); + return -EFAULT; } spin_lock(&priv->lock); @@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); if (map) { list_del(&map->next); + if (populate_freeable_maps) + list_add_tail(&map->next, &priv->freeable_maps); err = 0; } spin_unlock(&priv->lock); if (map) - gntdev_put_map(map); + gntdev_put_map(priv, map); return err; } @@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, struct ioctl_gntdev_get_offset_for_vaddr op; struct vm_area_struct *vma; struct grant_map *map; + int rv = -EINVAL; if (copy_from_user(&op, u, sizeof(op)) != 0) return -EFAULT; pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); + down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, op.vaddr); if (!vma || vma->vm_ops != &gntdev_vmops) - return -EINVAL; + goto out_unlock; map = vma->vm_private_data; if (!map) - return -EINVAL; + goto out_unlock; op.offset = map->index << PAGE_SHIFT; op.count = map->count; + rv = 0; - if (copy_to_user(u, &op, sizeof(op)) != 0) + out_unlock: + up_read(¤t->mm->mmap_sem); + + if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0) return -EFAULT; - return 0; + return rv; } static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) @@ -778,7 +824,7 @@ out_unlock_put: out_put_map: if (use_ptemod) map->vma = NULL; - gntdev_put_map(map); + gntdev_put_map(priv, map); return err; } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7038de53652b..157c0ccda3ef 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -56,10 +56,6 @@ /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff -#define GREFS_PER_GRANT_FRAME \ -(grant_table_version == 1 ? \ -(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \ -(PAGE_SIZE / sizeof(union grant_entry_v2))) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; @@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface; static grant_status_t *grstatus; static int grant_table_version; +static int grefs_per_grant_frame; static struct gnttab_free_callback *gnttab_free_callback_list; @@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames) unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; + BUG_ON(grefs_per_grant_frame == 0); + new_nr_grant_frames = nr_grant_frames + more_frames; - extra_entries = more_frames * GREFS_PER_GRANT_FRAME; + extra_entries = more_frames * grefs_per_grant_frame; - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; new_nr_glist_frames = - (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; + (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) @@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames) } - for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; - i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) + for (i = grefs_per_grant_frame * nr_grant_frames; + i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; - gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; + gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; @@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs); static unsigned nr_status_frames(unsigned nr_grant_frames) { - return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; + BUG_ON(grefs_per_grant_frame == 0); + return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; } static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) @@ -1115,6 +1115,7 @@ static void gnttab_request_version(void) rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); if (rc == 0 && gsv.version == 2) { grant_table_version = 2; + grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); gnttab_interface = &gnttab_v2_ops; } else if (grant_table_version == 2) { /* @@ -1127,17 +1128,17 @@ static void gnttab_request_version(void) panic("we need grant tables version 2, but only version 1 is available"); } else { grant_table_version = 1; + grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); gnttab_interface = &gnttab_v1_ops; } printk(KERN_INFO "Grant tables using version %d layout.\n", grant_table_version); } -int gnttab_resume(void) +static int gnttab_setup(void) { unsigned int max_nr_gframes; - gnttab_request_version(); max_nr_gframes = gnttab_max_grant_frames(); if (max_nr_gframes < nr_grant_frames) return -ENOSYS; @@ -1160,6 +1161,12 @@ int gnttab_resume(void) return 0; } +int gnttab_resume(void) +{ + gnttab_request_version(); + return gnttab_setup(); +} + int gnttab_suspend(void) { gnttab_interface->unmap_frames(); @@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries) int rc; unsigned int cur, extra; + BUG_ON(grefs_per_grant_frame == 0); cur = nr_grant_frames; - extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / - GREFS_PER_GRANT_FRAME); + extra = ((req_entries + (grefs_per_grant_frame-1)) / + grefs_per_grant_frame); if (cur + extra > gnttab_max_grant_frames()) return -ENOSPC; @@ -1191,21 +1199,23 @@ int gnttab_init(void) unsigned int nr_init_grefs; int ret; + gnttab_request_version(); nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ + BUG_ON(grefs_per_grant_frame == 0); max_nr_glist_frames = (boot_max_nr_grant_frames * - GREFS_PER_GRANT_FRAME / RPP); + grefs_per_grant_frame / RPP); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) { @@ -1214,12 +1224,12 @@ int gnttab_init(void) } } - if (gnttab_resume() < 0) { + if (gnttab_setup() < 0) { ret = -ENODEV; goto ini_nomem; } - nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; + nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 0bbbccbb1f12..ca2b00e9d558 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata) LIST_HEAD(pagelist); struct mmap_mfn_state state; - if (!xen_initial_domain()) - return -EPERM; - /* We only support privcmd_ioctl_mmap_batch for auto translated. */ if (xen_feature(XENFEAT_auto_translated_physmap)) return -ENOSYS; @@ -261,11 +258,12 @@ struct mmap_batch_state { * -ENOENT if at least 1 -ENOENT has happened. */ int global_error; - /* An array for individual errors */ - int *err; + int version; /* User-space mfn array to store errors in the second pass for V1. */ xen_pfn_t __user *user_mfn; + /* User-space int array to store errors in the second pass for V2. */ + int __user *user_err; }; /* auto translated dom0 note: if domU being created is PV, then mfn is @@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state) &cur_page); /* Store error code for second pass. */ - *(st->err++) = ret; + if (st->version == 1) { + if (ret < 0) { + /* + * V1 encodes the error codes in the 32bit top nibble of the + * mfn (with its known limitations vis-a-vis 64 bit callers). + */ + *mfnp |= (ret == -ENOENT) ? + PRIVCMD_MMAPBATCH_PAGED_ERROR : + PRIVCMD_MMAPBATCH_MFN_ERROR; + } + } else { /* st->version == 2 */ + *((int *) mfnp) = ret; + } /* And see if it affects the global_error. */ if (ret < 0) { @@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state) return 0; } -static int mmap_return_errors_v1(void *data, void *state) +static int mmap_return_errors(void *data, void *state) { - xen_pfn_t *mfnp = data; struct mmap_batch_state *st = state; - int err = *(st->err++); - /* - * V1 encodes the error codes in the 32bit top nibble of the - * mfn (with its known limitations vis-a-vis 64 bit callers). - */ - *mfnp |= (err == -ENOENT) ? - PRIVCMD_MMAPBATCH_PAGED_ERROR : - PRIVCMD_MMAPBATCH_MFN_ERROR; - return __put_user(*mfnp, st->user_mfn++); + if (st->version == 1) { + xen_pfn_t mfnp = *((xen_pfn_t *) data); + if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) + return __put_user(mfnp, st->user_mfn++); + else + st->user_mfn++; + } else { /* st->version == 2 */ + int err = *((int *) data); + if (err) + return __put_user(err, st->user_err++); + else + st->user_err++; + } + + return 0; } /* Allocate pfns that are then mapped with gmfns from foreign domid. Update @@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) struct vm_area_struct *vma; unsigned long nr_pages; LIST_HEAD(pagelist); - int *err_array = NULL; struct mmap_batch_state state; - if (!xen_initial_domain()) - return -EPERM; - switch (version) { case 1: if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) @@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) goto out; } - err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); - if (err_array == NULL) { - ret = -ENOMEM; - goto out; + if (version == 2) { + /* Zero error array now to only copy back actual errors. */ + if (clear_user(m.err, sizeof(int) * m.num)) { + ret = -EFAULT; + goto out; + } } down_write(&mm->mmap_sem); @@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) state.va = m.addr; state.index = 0; state.global_error = 0; - state.err = err_array; + state.version = version; /* mmap_batch_fn guarantees ret == 0 */ BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), @@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) up_write(&mm->mmap_sem); - if (version == 1) { - if (state.global_error) { - /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; - state.err = err_array; - ret = traverse_pages(m.num, sizeof(xen_pfn_t), - &pagelist, mmap_return_errors_v1, &state); - } else - ret = 0; - - } else if (version == 2) { - ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); - if (ret) - ret = -EFAULT; - } + if (state.global_error) { + /* Write back errors in second pass. */ + state.user_mfn = (xen_pfn_t *)m.arr; + state.user_err = m.err; + ret = traverse_pages(m.num, sizeof(xen_pfn_t), + &pagelist, mmap_return_errors, &state); + } else + ret = 0; /* If we have not had any EFAULT-like global errors then set the global * error to -ENOENT if necessary. */ @@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ret = -ENOENT; out: - kfree(err_array); free_page_list(&pagelist); return ret; diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index a7def010eba3..f72af87640e0 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev) { - if (xen_pcibk_backend && xen_pcibk_backend->free) + if (xen_pcibk_backend && xen_pcibk_backend->release) return xen_pcibk_backend->release(pdev, dev); } diff --git a/fs/Kconfig b/fs/Kconfig index cfe512fd1caf..780725a463b1 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -68,16 +68,6 @@ source "fs/quota/Kconfig" source "fs/autofs4/Kconfig" source "fs/fuse/Kconfig" -config CUSE - tristate "Character device in Userspace support" - depends on FUSE_FS - help - This FUSE extension allows character devices to be - implemented in userspace. - - If you want to develop or use userspace character device - based on CUSE, answer Y or M. - config GENERIC_ACL bool select FS_POSIX_ACL diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index e95b94945d5f..137af4255da6 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type) retval = f2fs_getxattr(inode, name_index, "", value, retval); } - if (retval < 0) { - if (retval == -ENODATA) - acl = NULL; - else - acl = ERR_PTR(retval); - } else { + if (retval > 0) acl = f2fs_acl_from_disk(value, retval); - } + else if (retval == -ENODATA) + acl = NULL; + else + acl = ERR_PTR(retval); kfree(value); + if (!IS_ERR(acl)) set_cached_acl(inode, type, acl); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 6ef36c37e2be..ff3c8439af87 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -214,7 +214,6 @@ retry: goto retry; } new->ino = ino; - INIT_LIST_HEAD(&new->list); /* add new_oentry into list which is sorted by inode number */ if (orphan) { @@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi) sbi->n_orphans = 0; } -int create_checkpoint_caches(void) +int __init create_checkpoint_caches(void) { orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", sizeof(struct orphan_inode_entry), NULL); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 3aa5ce7cab83..7bd22a201125 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -547,6 +547,15 @@ redirty_out: #define MAX_DESIRED_PAGES_WP 4096 +static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, + void *data) +{ + struct address_space *mapping = data; + int ret = mapping->a_ops->writepage(page, wbc); + mapping_set_error(mapping, ret); + return ret; +} + static int f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc) { @@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping, if (!S_ISDIR(inode->i_mode)) mutex_lock(&sbi->writepages); - ret = generic_writepages(mapping, wbc); + ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); if (!S_ISDIR(inode->i_mode)) mutex_unlock(&sbi->writepages); f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); @@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page) return 0; } +static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) +{ + return generic_block_bmap(mapping, block, get_data_block_ro); +} + const struct address_space_operations f2fs_dblock_aops = { .readpage = f2fs_read_data_page, .readpages = f2fs_read_data_pages, @@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = { .invalidatepage = f2fs_invalidate_data_page, .releasepage = f2fs_release_data_page, .direct_IO = f2fs_direct_IO, + .bmap = f2fs_bmap, }; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 0e0380a588ad..c8c37307b326 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -26,6 +26,7 @@ static LIST_HEAD(f2fs_stat_list); static struct dentry *debugfs_root; +static DEFINE_MUTEX(f2fs_stat_mutex); static void update_general_status(struct f2fs_sb_info *sbi) { @@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v) int i = 0; int j; + mutex_lock(&f2fs_stat_mutex); list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { - mutex_lock(&si->stat_lock); - if (!si->sbi) { - mutex_unlock(&si->stat_lock); - continue; - } update_general_status(si->sbi); seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); - seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ", - si->nat_area_segs, si->sit_area_segs); + seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", + si->sit_area_segs, si->nat_area_segs); seq_printf(s, "[SSA: %d] [MAIN: %d", si->ssa_area_segs, si->main_area_segs); seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", @@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", (si->base_mem + si->cache_mem) >> 10, si->base_mem >> 10, si->cache_mem >> 10); - mutex_unlock(&si->stat_lock); } + mutex_unlock(&f2fs_stat_mutex); return 0; } @@ -303,7 +300,7 @@ static const struct file_operations stat_fops = { .release = single_release, }; -static int init_stats(struct f2fs_sb_info *sbi) +int f2fs_build_stats(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_stat_info *si; @@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi) return -ENOMEM; si = sbi->stat_info; - mutex_init(&si->stat_lock); - list_add_tail(&si->stat_list, &f2fs_stat_list); - si->all_area_segs = le32_to_cpu(raw_super->segment_count); si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); @@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi) si->main_area_zones = si->main_area_sections / le32_to_cpu(raw_super->secs_per_zone); si->sbi = sbi; - return 0; -} -int f2fs_build_stats(struct f2fs_sb_info *sbi) -{ - int retval; - - retval = init_stats(sbi); - if (retval) - return retval; - - if (!debugfs_root) - debugfs_root = debugfs_create_dir("f2fs", NULL); + mutex_lock(&f2fs_stat_mutex); + list_add_tail(&si->stat_list, &f2fs_stat_list); + mutex_unlock(&f2fs_stat_mutex); - debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops); return 0; } @@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { struct f2fs_stat_info *si = sbi->stat_info; + mutex_lock(&f2fs_stat_mutex); list_del(&si->stat_list); - mutex_lock(&si->stat_lock); - si->sbi = NULL; - mutex_unlock(&si->stat_lock); + mutex_unlock(&f2fs_stat_mutex); + kfree(sbi->stat_info); } -void destroy_root_stats(void) +void __init f2fs_create_root_stats(void) +{ + debugfs_root = debugfs_create_dir("f2fs", NULL); + if (debugfs_root) + debugfs_create_file("status", S_IRUGO, debugfs_root, + NULL, &stat_fops); +} + +void f2fs_destroy_root_stats(void) { debugfs_remove_recursive(debugfs_root); debugfs_root = NULL; diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 951ed52748f6..989980e16d0b 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, } if (inode) { - inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; + inode->i_ctime = CURRENT_TIME; drop_nlink(inode); if (S_ISDIR(inode->i_mode)) { drop_nlink(inode); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 13c6dfbb7183..c8e2d751ef9c 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -211,11 +211,11 @@ struct dnode_of_data { static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, struct page *ipage, struct page *npage, nid_t nid) { + memset(dn, 0, sizeof(*dn)); dn->inode = inode; dn->inode_page = ipage; dn->node_page = npage; dn->nid = nid; - dn->inode_page_locked = 0; } /* @@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *); * super.c */ int f2fs_sync_fs(struct super_block *, int); +extern __printf(3, 4) +void f2fs_msg(struct super_block *, const char *, const char *, ...); /* * hash.c @@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int, void flush_nat_entries(struct f2fs_sb_info *); int build_node_manager(struct f2fs_sb_info *); void destroy_node_manager(struct f2fs_sb_info *); -int create_node_manager_caches(void); +int __init create_node_manager_caches(void); void destroy_node_manager_caches(void); /* @@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *); void block_operations(struct f2fs_sb_info *); void write_checkpoint(struct f2fs_sb_info *, bool, bool); void init_orphan_info(struct f2fs_sb_info *); -int create_checkpoint_caches(void); +int __init create_checkpoint_caches(void); void destroy_checkpoint_caches(void); /* @@ -984,9 +986,9 @@ int do_write_data_page(struct page *); int start_gc_thread(struct f2fs_sb_info *); void stop_gc_thread(struct f2fs_sb_info *); block_t start_bidx_of_node(unsigned int); -int f2fs_gc(struct f2fs_sb_info *, int); +int f2fs_gc(struct f2fs_sb_info *); void build_gc_manager(struct f2fs_sb_info *); -int create_gc_caches(void); +int __init create_gc_caches(void); void destroy_gc_caches(void); /* @@ -1058,7 +1060,8 @@ struct f2fs_stat_info { int f2fs_build_stats(struct f2fs_sb_info *); void f2fs_destroy_stats(struct f2fs_sb_info *); -void destroy_root_stats(void); +void __init f2fs_create_root_stats(void); +void f2fs_destroy_root_stats(void); #else #define stat_inc_call_count(si) #define stat_inc_seg_count(si, type) @@ -1068,7 +1071,8 @@ void destroy_root_stats(void); static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } -static inline void destroy_root_stats(void) { } +static inline void __init f2fs_create_root_stats(void) { } +static inline void f2fs_destroy_root_stats(void) { } #endif extern const struct file_operations f2fs_dir_operations; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 7f9ea9271ebe..3191b52aafb0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -96,8 +96,9 @@ out: } static const struct vm_operations_struct f2fs_file_vm_ops = { - .fault = filemap_fault, - .page_mkwrite = f2fs_vm_page_mkwrite, + .fault = filemap_fault, + .page_mkwrite = f2fs_vm_page_mkwrite, + .remap_pages = generic_file_remap_pages, }; static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) @@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (ret) return ret; + /* guarantee free sections for fsync */ + f2fs_balance_fs(sbi); + mutex_lock(&inode->i_mutex); if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) @@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) struct dnode_of_data dn; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + f2fs_balance_fs(sbi); + mutex_lock_op(sbi, DATA_TRUNC); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, RDONLY_NODE); @@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file->f_path.dentry->d_inode; - struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); long ret; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) @@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode, else ret = expand_inode_data(inode, offset, len, mode); - f2fs_balance_fs(sbi); + if (!ret) { + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + } return ret; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index b0ec721e984a..c386910dacc5 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -78,7 +78,7 @@ static int gc_thread_func(void *data) sbi->bg_gc++; - if (f2fs_gc(sbi, 1) == GC_NONE) + if (f2fs_gc(sbi) == GC_NONE) wait_ms = GC_THREAD_NOGC_SLEEP_TIME; else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) wait_ms = GC_THREAD_MAX_SLEEP_TIME; @@ -424,7 +424,11 @@ next_step: } /* - * Calculate start block index that this node page contains + * Calculate start block index indicating the given node offset. + * Be careful, caller should give this node offset only indicating direct node + * blocks. If any node offsets, which point the other types of node blocks such + * as indirect or double indirect node blocks, are given, it must be a caller's + * bug. */ block_t start_bidx_of_node(unsigned int node_ofs) { @@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, return ret; } -int f2fs_gc(struct f2fs_sb_info *sbi, int nGC) +int f2fs_gc(struct f2fs_sb_info *sbi) { - unsigned int segno; - int old_free_secs, cur_free_secs; - int gc_status, nfree; struct list_head ilist; + unsigned int segno, i; int gc_type = BG_GC; + int gc_status = GC_NONE; INIT_LIST_HEAD(&ilist); gc_more: - nfree = 0; - gc_status = GC_NONE; + if (!(sbi->sb->s_flags & MS_ACTIVE)) + goto stop; if (has_not_enough_free_secs(sbi)) - old_free_secs = reserved_sections(sbi); - else - old_free_secs = free_sections(sbi); - - while (sbi->sb->s_flags & MS_ACTIVE) { - int i; - if (has_not_enough_free_secs(sbi)) - gc_type = FG_GC; + gc_type = FG_GC; - cur_free_secs = free_sections(sbi) + nfree; + if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) + goto stop; - /* We got free space successfully. */ - if (nGC < cur_free_secs - old_free_secs) - break; - - if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) + for (i = 0; i < sbi->segs_per_sec; i++) { + /* + * do_garbage_collect will give us three gc_status: + * GC_ERROR, GC_DONE, and GC_BLOCKED. + * If GC is finished uncleanly, we have to return + * the victim to dirty segment list. + */ + gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); + if (gc_status != GC_DONE) break; - - for (i = 0; i < sbi->segs_per_sec; i++) { - /* - * do_garbage_collect will give us three gc_status: - * GC_ERROR, GC_DONE, and GC_BLOCKED. - * If GC is finished uncleanly, we have to return - * the victim to dirty segment list. - */ - gc_status = do_garbage_collect(sbi, segno + i, - &ilist, gc_type); - if (gc_status != GC_DONE) - goto stop; - nfree++; - } } -stop: - if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) { + if (has_not_enough_free_secs(sbi)) { write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); - if (nfree) + if (has_not_enough_free_secs(sbi)) goto gc_more; } +stop: mutex_unlock(&sbi->gc_mutex); put_gc_inode(&ilist); - BUG_ON(!list_empty(&ilist)); return gc_status; } @@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi) DIRTY_I(sbi)->v_ops = &default_v_ops; } -int create_gc_caches(void) +int __init create_gc_caches(void) { winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", sizeof(struct inode_entry), NULL); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index bf20b4d03214..794241777322 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) inode->i_ino == F2FS_META_INO(sbi)) return 0; + if (wbc) + f2fs_balance_fs(sbi); + node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) return PTR_ERR(node_page); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 5066bfd256c9..9bda63c9c166 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page, return 0; } +/* + * It is very important to gather dirty pages and write at once, so that we can + * submit a big bio without interfering other data writes. + * Be default, 512 pages (2MB), a segment size, is quite reasonable. + */ +#define COLLECT_DIRTY_NODES 512 static int f2fs_write_node_pages(struct address_space *mapping, struct writeback_control *wbc) { @@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping, struct block_device *bdev = sbi->sb->s_bdev; long nr_to_write = wbc->nr_to_write; - if (wbc->for_kupdate) - return 0; - - if (get_pages(sbi, F2FS_DIRTY_NODES) == 0) - return 0; - + /* First check balancing cached NAT entries */ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { write_checkpoint(sbi, false, false); return 0; } + /* collect a number of dirty node pages and write together */ + if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) + return 0; + /* if mounting is failed, skip writing node pages */ wbc->nr_to_write = bio_get_nr_vecs(bdev); sync_node_pages(sbi, 0, wbc); @@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) kfree(nm_i); } -int create_node_manager_caches(void) +int __init create_node_manager_caches(void) { nat_entry_slab = f2fs_kmem_cache_create("nat_entry", sizeof(struct nat_entry), NULL); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index b571fee677d5..f42e4060b399 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode) kunmap(page); f2fs_put_page(page, 0); } else { - f2fs_add_link(&dent, inode); + err = f2fs_add_link(&dent, inode); } iput(dir); out: @@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) goto out; } - INIT_LIST_HEAD(&entry->list); list_add_tail(&entry->list, head); entry->blkaddr = blkaddr; } @@ -174,10 +173,9 @@ out: static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) { - struct list_head *this; - struct fsync_inode_entry *entry; - list_for_each(this, head) { - entry = list_entry(this, struct fsync_inode_entry, list); + struct fsync_inode_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, head, list) { iput(entry->inode); list_del(&entry->list); kmem_cache_free(fsync_entry_slab, entry); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index de6240922b0a..4b0099066582 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi) */ if (has_not_enough_free_secs(sbi)) { mutex_lock(&sbi->gc_mutex); - f2fs_gc(sbi, 1); + f2fs_gc(sbi); } } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 08a94c814bdc..37fad04c8669 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = { {Opt_err, NULL}, }; +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); + va_end(args); +} + static void init_once(void *foo) { struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; @@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync) if (sync) write_checkpoint(sbi, false, false); + else + f2fs_balance_fs(sbi); return 0; } @@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = { .get_parent = f2fs_get_parent, }; -static int parse_options(struct f2fs_sb_info *sbi, char *options) +static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, + char *options) { substring_t args[MAX_OPT_ARGS]; char *p; @@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) break; #else case Opt_nouser_xattr: - pr_info("nouser_xattr options not supported\n"); + f2fs_msg(sb, KERN_INFO, + "nouser_xattr options not supported"); break; #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL @@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) break; #else case Opt_noacl: - pr_info("noacl options not supported\n"); + f2fs_msg(sb, KERN_INFO, "noacl options not supported"); break; #endif case Opt_active_logs: @@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) set_opt(sbi, DISABLE_EXT_IDENTIFY); break; default: - pr_err("Unrecognized mount option \"%s\" or missing value\n", - p); + f2fs_msg(sb, KERN_ERR, + "Unrecognized mount option \"%s\" or missing value", + p); return -EINVAL; } } @@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits) return result; } -static int sanity_check_raw_super(struct f2fs_super_block *raw_super) +static int sanity_check_raw_super(struct super_block *sb, + struct f2fs_super_block *raw_super) { unsigned int blocksize; - if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) + if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { + f2fs_msg(sb, KERN_INFO, + "Magic Mismatch, valid(0x%x) - read(0x%x)", + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); return 1; + } /* Currently, support only 4KB block size */ blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); - if (blocksize != PAGE_CACHE_SIZE) + if (blocksize != PAGE_CACHE_SIZE) { + f2fs_msg(sb, KERN_INFO, + "Invalid blocksize (%u), supports only 4KB\n", + blocksize); return 1; + } if (le32_to_cpu(raw_super->log_sectorsize) != - F2FS_LOG_SECTOR_SIZE) + F2FS_LOG_SECTOR_SIZE) { + f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize"); return 1; + } if (le32_to_cpu(raw_super->log_sectors_per_block) != - F2FS_LOG_SECTORS_PER_BLOCK) + F2FS_LOG_SECTORS_PER_BLOCK) { + f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block"); return 1; + } return 0; } @@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (!sbi) return -ENOMEM; - /* set a temporary block size */ - if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) + /* set a block size */ + if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) { + f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; + } /* read f2fs raw super block */ raw_super_buf = sb_bread(sb, 0); if (!raw_super_buf) { err = -EIO; + f2fs_msg(sb, KERN_ERR, "unable to read superblock"); goto free_sbi; } raw_super = (struct f2fs_super_block *) @@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ - if (parse_options(sbi, (char *)data)) + if (parse_options(sb, sbi, (char *)data)) goto free_sb_buf; /* sanity checking of raw super */ - if (sanity_check_raw_super(raw_super)) + if (sanity_check_raw_super(sb, raw_super)) { + f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem"); goto free_sb_buf; + } sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; @@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { + f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_sb_buf; } err = get_valid_checkpoint(sbi); - if (err) + if (err) { + f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; + } /* sanity checking of checkpoint */ err = -EINVAL; - if (sanity_check_ckpt(raw_super, sbi->ckpt)) + if (sanity_check_ckpt(raw_super, sbi->ckpt)) { + f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; + } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); @@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); - /* init super block */ - if (!sb_set_blocksize(sb, sbi->blocksize)) - goto free_cp; - init_orphan_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); - if (err) + if (err) { + f2fs_msg(sb, KERN_ERR, + "Failed to initialize F2FS segment manager"); goto free_sm; + } err = build_node_manager(sbi); - if (err) + if (err) { + f2fs_msg(sb, KERN_ERR, + "Failed to initialize F2FS node manager"); goto free_nm; + } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { + f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } @@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { + f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } @@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static int init_inodecache(void) +static int __init init_inodecache(void) { f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", sizeof(struct f2fs_inode_info), NULL); @@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void) err = create_checkpoint_caches(); if (err) goto fail; - return register_filesystem(&f2fs_fs_type); + err = register_filesystem(&f2fs_fs_type); + if (err) + goto fail; + f2fs_create_root_stats(); fail: return err; } static void __exit exit_f2fs_fs(void) { - destroy_root_stats(); + f2fs_destroy_root_stats(); unregister_filesystem(&f2fs_fs_type); destroy_checkpoint_caches(); destroy_gc_caches(); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 940136a3d3a6..8038c0496504 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name, if (name_len > 255 || value_len > MAX_VALUE_LEN) return -ERANGE; + f2fs_balance_fs(sbi); + mutex_lock_op(sbi, NODE_NEW); if (!fi->i_xattr_nid) { /* Allocate new attribute block */ diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 0cf160a94eda..1b2f6c2c3aaf 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -4,12 +4,24 @@ config FUSE_FS With FUSE it is possible to implement a fully functional filesystem in a userspace program. - There's also companion library: libfuse. This library along with - utilities is available from the FUSE homepage: + There's also a companion library: libfuse2. This library is available + from the FUSE homepage: <http://fuse.sourceforge.net/> + although chances are your distribution already has that library + installed if you've installed the "fuse" package itself. See <file:Documentation/filesystems/fuse.txt> for more information. See <file:Documentation/Changes> for needed library/utility version. If you want to develop a userspace FS, or if you want to use a filesystem based on FUSE, answer Y or M. + +config CUSE + tristate "Character device in Userspace support" + depends on FUSE_FS + help + This FUSE extension allows character devices to be + implemented in userspace. + + If you want to develop or use a userspace character device + based on CUSE, answer Y or M. diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index ee8d55042298..e397b675b029 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -45,7 +45,6 @@ #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/stat.h> #include <linux/module.h> @@ -63,7 +62,7 @@ struct cuse_conn { bool unrestricted_ioctl; }; -static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ +static DEFINE_MUTEX(cuse_lock); /* protects registration */ static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; static struct class *cuse_class; @@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file) int rc; /* look up and get the connection */ - spin_lock(&cuse_lock); + mutex_lock(&cuse_lock); list_for_each_entry(pos, cuse_conntbl_head(devt), list) if (pos->dev->devt == devt) { fuse_conn_get(&pos->fc); cc = pos; break; } - spin_unlock(&cuse_lock); + mutex_unlock(&cuse_lock); /* dead? */ if (!cc) @@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) { char *end = p + len; - char *key, *val; + char *uninitialized_var(key), *uninitialized_var(val); int rc; while (true) { @@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev) */ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { - struct cuse_conn *cc = fc_to_cc(fc); + struct cuse_conn *cc = fc_to_cc(fc), *pos; struct cuse_init_out *arg = req->out.args[0].value; struct page *page = req->pages[0]; struct cuse_devinfo devinfo = { }; struct device *dev; struct cdev *cdev; dev_t devt; - int rc; + int rc, i; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { @@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) dev_set_drvdata(dev, cc); dev_set_name(dev, "%s", devinfo.name); + mutex_lock(&cuse_lock); + + /* make sure the device-name is unique */ + for (i = 0; i < CUSE_CONNTBL_LEN; ++i) { + list_for_each_entry(pos, &cuse_conntbl[i], list) + if (!strcmp(dev_name(pos->dev), dev_name(dev))) + goto err_unlock; + } + rc = device_add(dev); if (rc) - goto err_device; + goto err_unlock; /* register cdev */ rc = -ENOMEM; cdev = cdev_alloc(); if (!cdev) - goto err_device; + goto err_unlock; cdev->owner = THIS_MODULE; cdev->ops = &cuse_frontend_fops; @@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) cc->cdev = cdev; /* make the device available */ - spin_lock(&cuse_lock); list_add(&cc->list, cuse_conntbl_head(devt)); - spin_unlock(&cuse_lock); + mutex_unlock(&cuse_lock); /* announce device availability */ dev_set_uevent_suppress(dev, 0); @@ -391,7 +398,8 @@ out: err_cdev: cdev_del(cdev); -err_device: +err_unlock: + mutex_unlock(&cuse_lock); put_device(dev); err_region: unregister_chrdev_region(devt, 1); @@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file) int rc; /* remove from the conntbl, no more access from this point on */ - spin_lock(&cuse_lock); + mutex_lock(&cuse_lock); list_del_init(&cc->list); - spin_unlock(&cuse_lock); + mutex_unlock(&cuse_lock); /* remove device */ if (cc->dev) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index c16335315e5d..e83351aa5bad 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) struct page *oldpage = *pagep; struct page *newpage; struct pipe_buffer *buf = cs->pipebufs; - struct address_space *mapping; - pgoff_t index; unlock_request(cs->fc, cs->req); fuse_copy_finish(cs); @@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) if (fuse_check_page(newpage) != 0) goto out_fallback_unlock; - mapping = oldpage->mapping; - index = oldpage->index; - /* * This is a new and locked page, it shouldn't be mapped or * have any special flags on it diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e21d4d8f87e3..f3ab824fa302 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, return ret; } -long fuse_file_fallocate(struct file *file, int mode, loff_t offset, - loff_t length) +static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, + loff_t length) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; @@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset, return err; } -EXPORT_SYMBOL_GPL(fuse_file_fallocate); static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 701beab27aab..5cf680a98f9b 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn) return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); } -static inline unsigned long my_zero_pfn(unsigned long addr) -{ - return page_to_pfn(ZERO_PAGE(addr)); -} +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + #else static inline int is_zero_pfn(unsigned long pfn) { diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 58f466ff00d3..1db51b8524e9 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long fd, off_t pgoff); #endif +#ifndef CONFIG_GENERIC_SIGALTSTACK #ifndef sys_sigaltstack asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, struct pt_regs *); #endif +#endif #ifndef sys_rt_sigreturn asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); diff --git a/include/linux/ata.h b/include/linux/ata.h index 408da9502177..8f7a3d68371a 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -297,10 +297,12 @@ enum { ATA_LOG_SATA_NCQ = 0x10, ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, - ATA_LOG_DEVSLP_MDAT = 0x30, + ATA_LOG_DEVSLP_OFFSET = 0x30, + ATA_LOG_DEVSLP_SIZE = 0x08, + ATA_LOG_DEVSLP_MDAT = 0x00, ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, - ATA_LOG_DEVSLP_DETO = 0x31, - ATA_LOG_DEVSLP_VALID = 0x37, + ATA_LOG_DEVSLP_DETO = 0x01, + ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, /* READ/WRITE LONG (obsolete) */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 83ba0ab2c915..649e5f86b5f0 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -652,8 +652,8 @@ struct ata_device { u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ }; - /* Identify Device Data Log (30h), SATA Settings (page 08h) */ - u8 sata_settings[ATA_SECT_SIZE]; + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; /* error history */ int spdn_cnt; diff --git a/include/linux/module.h b/include/linux/module.h index 7760c6d344a3..1375ee3f03aa 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -199,11 +199,11 @@ struct module_use { struct module *source, *target; }; -enum module_state -{ - MODULE_STATE_LIVE, - MODULE_STATE_COMING, - MODULE_STATE_GOING, +enum module_state { + MODULE_STATE_LIVE, /* Normal state. */ + MODULE_STATE_COMING, /* Full formed, running module_init. */ + MODULE_STATE_GOING, /* Going away. */ + MODULE_STATE_UNFORMED, /* Still setting it up. */ }; /** diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 1693775ecfe8..89573a33ab3c 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request, extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern void ptrace_disable(struct task_struct *); -extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); diff --git a/include/linux/sched.h b/include/linux/sched.h index 6fc8f45de4e9..d2112477ff5e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2714,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); -extern void signal_wake_up(struct task_struct *t, int resume_stopped); +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} /* * Wrappers for p->thread_info->cpu access. No-op on UP. diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 78f99d97475b..2c6c85f18ea0 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -50,7 +50,8 @@ #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ -#define PORT_MAX_8250 24 /* max port ID */ +#define PORT_BRCM_TRUMANAGE 24 +#define PORT_MAX_8250 25 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 5e4ded51788e..f9acf71b9810 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -36,6 +36,10 @@ __setup("noinitrd", no_initrd); static int init_linuxrc(struct subprocess_info *info, struct cred *new) { sys_unshare(CLONE_FS | CLONE_FILES); + /* stdin/stdout/stderr for /linuxrc */ + sys_open("/dev/console", O_RDWR, 0); + sys_dup(0); + sys_dup(0); /* move initrd over / and chdir/chroot in initrd root */ sys_chdir("/root"); sys_mount(".", "/", NULL, MS_MOVE, NULL); diff --git a/init/main.c b/init/main.c index 85d69dffe864..92d728a32d51 100644 --- a/init/main.c +++ b/init/main.c @@ -802,7 +802,7 @@ static int run_init_process(const char *init_filename) (const char __user *const __user *)envp_init); } -static void __init kernel_init_freeable(void); +static noinline void __init kernel_init_freeable(void); static int __ref kernel_init(void *unused) { @@ -845,7 +845,7 @@ static int __ref kernel_init(void *unused) "See Linux Documentation/init.txt for guidance."); } -static void __init kernel_init_freeable(void) +static noinline void __init kernel_init_freeable(void) { /* * Wait until kthreadd is all set-up. diff --git a/kernel/async.c b/kernel/async.c index a1d585c351d6..6f34904a0b53 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -86,18 +86,27 @@ static atomic_t entry_count; */ static async_cookie_t __lowest_in_progress(struct async_domain *running) { + async_cookie_t first_running = next_cookie; /* infinity value */ + async_cookie_t first_pending = next_cookie; /* ditto */ struct async_entry *entry; + /* + * Both running and pending lists are sorted but not disjoint. + * Take the first cookies from both and return the min. + */ if (!list_empty(&running->domain)) { entry = list_first_entry(&running->domain, typeof(*entry), list); - return entry->cookie; + first_running = entry->cookie; } - list_for_each_entry(entry, &async_pending, list) - if (entry->running == running) - return entry->cookie; + list_for_each_entry(entry, &async_pending, list) { + if (entry->running == running) { + first_pending = entry->cookie; + break; + } + } - return next_cookie; /* "infinity" value */ + return min(first_running, first_pending); } static async_cookie_t lowest_in_progress(struct async_domain *running) @@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work) { struct async_entry *entry = container_of(work, struct async_entry, work); + struct async_entry *pos; unsigned long flags; ktime_t uninitialized_var(calltime), delta, rettime; struct async_domain *running = entry->running; - /* 1) move self to the running queue */ + /* 1) move self to the running queue, make sure it stays sorted */ spin_lock_irqsave(&async_lock, flags); - list_move_tail(&entry->list, &running->domain); + list_for_each_entry_reverse(pos, &running->domain, list) + if (entry->cookie < pos->cookie) + break; + list_move_tail(&entry->list, &pos->list); spin_unlock_irqrestore(&async_lock, flags); /* 2) run (and print duration) */ diff --git a/kernel/compat.c b/kernel/compat.c index f6150e92dfc9..36700e9e2be9 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) return 0; } -asmlinkage long -compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, - struct compat_rusage __user *ru) +COMPAT_SYSCALL_DEFINE4(wait4, + compat_pid_t, pid, + compat_uint_t __user *, stat_addr, + int, options, + struct compat_rusage __user *, ru) { if (!ru) { return sys_wait4(pid, stat_addr, options, NULL); @@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, } } -asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, - struct compat_siginfo __user *uinfo, int options, - struct compat_rusage __user *uru) +COMPAT_SYSCALL_DEFINE5(waitid, + int, which, compat_pid_t, pid, + struct compat_siginfo __user *, uinfo, int, options, + struct compat_rusage __user *, uru) { siginfo_t info; struct rusage ru; @@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, return ret; if (uru) { - ret = put_compat_rusage(&ru, uru); + /* sys_waitid() overwrites everything in ru */ + if (COMPAT_USE_64BIT_TIME) + ret = copy_to_user(uru, &ru, sizeof(ru)); + else + ret = put_compat_rusage(&ru, uru); if (ret) return ret; } @@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, sigset_from_compat(&s, &s32); if (uts) { - if (get_compat_timespec(&t, uts)) + if (compat_get_timespec(&t, uts)) return -EFAULT; } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4d5f8d5612f3..8875254120b6 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv) kdb_printf("Module Size modstruct Used by\n"); list_for_each_entry(mod, kdb_modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; kdb_printf("%-20s%8u 0x%p ", mod->name, mod->core_size, (void *)mod); diff --git a/kernel/fork.c b/kernel/fork.c index 65ca6d27f24e..c535f33bbb9c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1668,8 +1668,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int, tls_val) #endif { - return do_fork(clone_flags, newsp, 0, - parent_tidptr, child_tidptr); + long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); + asmlinkage_protect(5, ret, clone_flags, newsp, + parent_tidptr, child_tidptr, tls_val); + return ret; } #endif diff --git a/kernel/module.c b/kernel/module.c index b10b048367e1..eab08274ec9b 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -188,6 +188,7 @@ struct load_info { ongoing or failed initialization etc. */ static inline int strong_try_module_get(struct module *mod) { + BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); if (mod && mod->state == MODULE_STATE_COMING) return -EBUSY; if (try_module_get(mod)) @@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, #endif }; + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) return true; } @@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name, EXPORT_SYMBOL_GPL(find_symbol); /* Search for module by name: must hold module_mutex. */ -struct module *find_module(const char *name) +static struct module *find_module_all(const char *name, + bool even_unformed) { struct module *mod; list_for_each_entry(mod, &modules, list) { + if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) + continue; if (strcmp(mod->name, name) == 0) return mod; } return NULL; } + +struct module *find_module(const char *name) +{ + return find_module_all(name, false); +} EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP @@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr) preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (!mod->percpu_size) continue; for_each_possible_cpu(cpu) { @@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr, case MODULE_STATE_GOING: state = "going"; break; + default: + BUG(); } return sprintf(buffer, "%s\n", state); } @@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void) mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if ((mod->module_core) && (mod->core_text_size)) { set_page_attributes(mod->module_core, mod->module_core + mod->core_text_size, @@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void) mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if ((mod->module_core) && (mod->core_text_size)) { set_page_attributes(mod->module_core, mod->module_core + mod->core_text_size, @@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info) err = -EFBIG; goto out; } + + /* Don't hand 0 to vmalloc, it whines. */ + if (stat.size == 0) { + err = -EINVAL; + goto out; + } + info->hdr = vmalloc(stat.size); if (!info->hdr) { err = -ENOMEM; @@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name) bool ret; mutex_lock(&module_mutex); - mod = find_module(name); - ret = !mod || mod->state != MODULE_STATE_COMING; + mod = find_module_all(name, true); + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; @@ -3136,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs, goto free_copy; } + /* + * We try to place it in the list now to make sure it's unique + * before we dedicate too many resources. In particular, + * temporary percpu memory exhaustion. + */ + mod->state = MODULE_STATE_UNFORMED; +again: + mutex_lock(&module_mutex); + if ((old = find_module_all(mod->name, true)) != NULL) { + if (old->state == MODULE_STATE_COMING + || old->state == MODULE_STATE_UNFORMED) { + /* Wait in case it fails to load. */ + mutex_unlock(&module_mutex); + err = wait_event_interruptible(module_wq, + finished_loading(mod->name)); + if (err) + goto free_module; + goto again; + } + err = -EEXIST; + mutex_unlock(&module_mutex); + goto free_module; + } + list_add_rcu(&mod->list, &modules); + mutex_unlock(&module_mutex); + #ifdef CONFIG_MODULE_SIG mod->sig_ok = info->sig_ok; if (!mod->sig_ok) @@ -3145,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs, /* Now module is in final location, initialize linked lists, etc. */ err = module_unload_init(mod); if (err) - goto free_module; + goto unlink_mod; /* Now we've got everything in the final locations, we can * find optional sections. */ @@ -3180,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs, goto free_arch_cleanup; } - /* Mark state as coming so strong_try_module_get() ignores us. */ - mod->state = MODULE_STATE_COMING; - - /* Now sew it into the lists so we can get lockdep and oops - * info during argument parsing. No one should access us, since - * strong_try_module_get() will fail. - * lockdep/oops can run asynchronous, so use the RCU list insertion - * function to insert in a way safe to concurrent readers. - * The mutex protects against concurrent writers. - */ -again: - mutex_lock(&module_mutex); - if ((old = find_module(mod->name)) != NULL) { - if (old->state == MODULE_STATE_COMING) { - /* Wait in case it fails to load. */ - mutex_unlock(&module_mutex); - err = wait_event_interruptible(module_wq, - finished_loading(mod->name)); - if (err) - goto free_arch_cleanup; - goto again; - } - err = -EEXIST; - goto unlock; - } - - /* This has to be done once we're sure module name is unique. */ dynamic_debug_setup(info->debug, info->num_debug); - /* Find duplicate symbols */ + mutex_lock(&module_mutex); + /* Find duplicate symbols (must be called under lock). */ err = verify_export_symbols(mod); if (err < 0) - goto ddebug; + goto ddebug_cleanup; + /* This relies on module_mutex for list integrity. */ module_bug_finalize(info->hdr, info->sechdrs, mod); - list_add_rcu(&mod->list, &modules); + + /* Mark state as coming so strong_try_module_get() ignores us, + * but kallsyms etc. can see us. */ + mod->state = MODULE_STATE_COMING; + mutex_unlock(&module_mutex); /* Module is ready to execute: parsing args may do that. */ err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, -32768, 32767, &ddebug_dyndbg_module_param_cb); if (err < 0) - goto unlink; + goto bug_cleanup; /* Link in to syfs. */ err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); if (err < 0) - goto unlink; + goto bug_cleanup; /* Get rid of temporary copy. */ free_copy(info); @@ -3237,16 +3270,13 @@ again: return do_init_module(mod); - unlink: + bug_cleanup: + /* module_bug_cleanup needs module_mutex protection */ mutex_lock(&module_mutex); - /* Unlink carefully: kallsyms could be walking list. */ - list_del_rcu(&mod->list); module_bug_cleanup(mod); - wake_up_all(&module_wq); - ddebug: - dynamic_debug_remove(info->debug); - unlock: + ddebug_cleanup: mutex_unlock(&module_mutex); + dynamic_debug_remove(info->debug); synchronize_sched(); kfree(mod->args); free_arch_cleanup: @@ -3255,6 +3285,12 @@ again: free_modinfo(mod); free_unload: module_unload_free(mod); + unlink_mod: + mutex_lock(&module_mutex); + /* Unlink carefully: kallsyms could be walking list. */ + list_del_rcu(&mod->list); + wake_up_all(&module_wq); + mutex_unlock(&module_mutex); free_module: module_deallocate(mod, info); free_copy: @@ -3377,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_init(addr, mod) || within_module_core(addr, mod)) { if (modname) @@ -3400,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_init(addr, mod) || within_module_core(addr, mod)) { const char *sym; @@ -3424,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_init(addr, mod) || within_module_core(addr, mod)) { const char *sym; @@ -3451,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (symnum < mod->num_symtab) { *value = mod->symtab[symnum].st_value; *type = mod->symtab[symnum].st_info; @@ -3493,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name) ret = mod_find_symname(mod, colon+1); *colon = ':'; } else { - list_for_each_entry_rcu(mod, &modules, list) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if ((ret = mod_find_symname(mod, name)) != 0) break; + } } preempt_enable(); return ret; @@ -3510,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, int ret; list_for_each_entry(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; for (i = 0; i < mod->num_symtab; i++) { ret = fn(data, mod->strtab + mod->symtab[i].st_name, mod, mod->symtab[i].st_value); @@ -3525,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf) { int bx = 0; + BUG_ON(mod->state == MODULE_STATE_UNFORMED); if (mod->taints || mod->state == MODULE_STATE_GOING || mod->state == MODULE_STATE_COMING) { @@ -3566,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p) struct module *mod = list_entry(p, struct module, list); char buf[8]; + /* We always ignore unformed modules. */ + if (mod->state == MODULE_STATE_UNFORMED) + return 0; + seq_printf(m, "%s %u", mod->name, mod->init_size + mod->core_size); print_unload_info(m, mod); @@ -3626,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (mod->num_exentries == 0) continue; @@ -3674,10 +3730,13 @@ struct module *__module_address(unsigned long addr) if (addr < module_addr_min || addr > module_addr_max) return NULL; - list_for_each_entry_rcu(mod, &modules, list) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_core(addr, mod) || within_module_init(addr, mod)) return mod; + } return NULL; } EXPORT_SYMBOL_GPL(__module_address); @@ -3730,8 +3789,11 @@ void print_modules(void) printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; printk(" %s%s", mod->name, module_flags(mod, buf)); + } preempt_enable(); if (last_unloaded_module[0]) printk(" [last unloaded: %s]", last_unloaded_module); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1599157336a6..6cbeaae4406d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child) * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) - signal_wake_up(child, task_is_traced(child)); + ptrace_signal_wake_up(child, true); spin_unlock(&child->sighand->siglock); } +/* Ensure that nothing can wake it up, even SIGKILL */ +static bool ptrace_freeze_traced(struct task_struct *task) +{ + bool ret = false; + + /* Lockless, nobody but us can set this flag */ + if (task->jobctl & JOBCTL_LISTENING) + return ret; + + spin_lock_irq(&task->sighand->siglock); + if (task_is_traced(task) && !__fatal_signal_pending(task)) { + task->state = __TASK_TRACED; + ret = true; + } + spin_unlock_irq(&task->sighand->siglock); + + return ret; +} + +static void ptrace_unfreeze_traced(struct task_struct *task) +{ + if (task->state != __TASK_TRACED) + return; + + WARN_ON(!task->ptrace || task->parent != current); + + spin_lock_irq(&task->sighand->siglock); + if (__fatal_signal_pending(task)) + wake_up_state(task, __TASK_TRACED); + else + task->state = TASK_TRACED; + spin_unlock_irq(&task->sighand->siglock); +} + /** * ptrace_check_attach - check whether ptracee is ready for ptrace operation * @child: ptracee to check for @@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child) * RETURNS: * 0 on success, -ESRCH if %child is not ready. */ -int ptrace_check_attach(struct task_struct *child, bool ignore_state) +static int ptrace_check_attach(struct task_struct *child, bool ignore_state) { int ret = -ESRCH; @@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state) * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); - if ((child->ptrace & PT_PTRACED) && child->parent == current) { + if (child->ptrace && child->parent == current) { + WARN_ON(child->state == __TASK_TRACED); /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ - spin_lock_irq(&child->sighand->siglock); - WARN_ON_ONCE(task_is_stopped(child)); - if (ignore_state || (task_is_traced(child) && - !(child->jobctl & JOBCTL_LISTENING))) + if (ignore_state || ptrace_freeze_traced(child)) ret = 0; - spin_unlock_irq(&child->sighand->siglock); } read_unlock(&tasklist_lock); - if (!ret && !ignore_state) - ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; + if (!ret && !ignore_state) { + if (!wait_task_inactive(child, __TASK_TRACED)) { + /* + * This can only happen if may_ptrace_stop() fails and + * ptrace_stop() changes ->state back to TASK_RUNNING, + * so we should not worry about leaking __TASK_TRACED. + */ + WARN_ON(child->state == __TASK_TRACED); + ret = -ESRCH; + } + } - /* All systems go.. */ return ret; } @@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request, */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) - signal_wake_up(task, 1); + signal_wake_up_state(task, __TASK_STOPPED); spin_unlock(&task->sighand->siglock); @@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request, * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) - signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); + ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; @@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request, * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) - signal_wake_up(child, true); + ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); @@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); + if (ret || request != PTRACE_DETACH) + ptrace_unfreeze_traced(child); out_put_task_struct: put_task_struct(child); @@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); - if (!ret) + if (!ret) { ret = compat_arch_ptrace(child, request, addr, data); + if (ret || request != PTRACE_DETACH) + ptrace_unfreeze_traced(child); + } out_put_task_struct: put_task_struct(child); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 257002c13bb0..26058d0bebba 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1523,7 +1523,8 @@ out: */ int wake_up_process(struct task_struct *p) { - return try_to_wake_up(p, TASK_ALL, 0); + WARN_ON(task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); diff --git a/kernel/signal.c b/kernel/signal.c index 372771e948c2..3d09cf6cde75 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * No need to set need_resched since signal event passing * goes through ->blocked */ -void signal_wake_up(struct task_struct *t, int resume) +void signal_wake_up_state(struct task_struct *t, unsigned int state) { - unsigned int mask; - set_tsk_thread_flag(t, TIF_SIGPENDING); - /* - * For SIGKILL, we want to wake it up in the stopped/traced/killable + * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ - mask = TASK_INTERRUPTIBLE; - if (resume) - mask |= TASK_WAKEKILL; - if (!wake_up_state(t, mask)) + if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) kick_process(t); } @@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t) assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); - signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); + ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); } /* @@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void) * If SIGKILL was already sent before the caller unlocked * ->siglock we must see ->core_state != NULL. Otherwise it * is safe to enter schedule(). + * + * This is almost outdated, a task with the pending SIGKILL can't + * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported + * after SIGKILL was already dequeued. */ if (unlikely(current->mm->core_state) && unlikely(current->mm == current->parent->mm)) @@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done) do_notify_parent_cldstop(current, false, why); + /* tasklist protects us from ptrace_freeze_traced() */ __set_current_state(TASK_RUNNING); if (clear_code) current->exit_code = 0; @@ -3116,8 +3115,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp) #ifdef CONFIG_COMPAT #ifdef CONFIG_GENERIC_SIGALTSTACK -asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, - compat_stack_t __user *uoss_ptr) +COMPAT_SYSCALL_DEFINE2(sigaltstack, + const compat_stack_t __user *, uss_ptr, + compat_stack_t __user *, uoss_ptr) { stack_t uss, uoss; int ret; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3ffe4c5ad3f3..41473b4ad7a4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self, struct notifier_block ftrace_module_nb = { .notifier_call = ftrace_module_notify, - .priority = 0, + .priority = INT_MAX, /* Run before anything that can use kprobes */ }; extern unsigned long __start_mcount_loc[]; diff --git a/lib/bug.c b/lib/bug.c index a28c1415357c..d0cdf14c651a 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) } #ifdef CONFIG_MODULES +/* Updates are protected by module mutex */ static LIST_HEAD(module_bug_list); static const struct bug_entry *module_find_bug(unsigned long bugaddr) diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 19ecc8de9e6b..d794abcc4b3b 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -215,7 +215,9 @@ static void devcgroup_css_free(struct cgroup *cgroup) struct dev_cgroup *dev_cgroup; dev_cgroup = cgroup_to_devcgroup(cgroup); + mutex_lock(&devcgroup_mutex); dev_exception_clean(dev_cgroup); + mutex_unlock(&devcgroup_mutex); kfree(dev_cgroup); } diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index dfb26918699c..7dd538ef5b83 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, &xattr_data, sizeof(xattr_data), 0); - } - else if (rc == -ENODATA) + } else if (rc == -ENODATA && inode->i_op->removexattr) { rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); + } return rc; } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index b8fb0a5adb9b..822df971972c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3654,6 +3654,7 @@ static void hda_call_codec_resume(struct hda_codec *codec) hda_set_power_state(codec, AC_PWRST_D0); restore_shutup_pins(codec); hda_exec_init_verbs(codec); + snd_hda_jack_set_dirty_all(codec); if (codec->patch_ops.resume) codec->patch_ops.resume(codec); else { @@ -3665,10 +3666,8 @@ static void hda_call_codec_resume(struct hda_codec *codec) if (codec->jackpoll_interval) hda_jackpoll_work(&codec->jackpoll_work.work); - else { - snd_hda_jack_set_dirty_all(codec); + else snd_hda_jack_report_sync(codec); - } codec->in_pm = 0; snd_hda_power_down(codec); /* flag down before returning */ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index dd798c3196ff..009b77a693cf 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -4636,6 +4636,12 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = { .patch = patch_conexant_auto }, { .id = 0x14f15111, .name = "CX20753/4", .patch = patch_conexant_auto }, + { .id = 0x14f15113, .name = "CX20755", + .patch = patch_conexant_auto }, + { .id = 0x14f15114, .name = "CX20756", + .patch = patch_conexant_auto }, + { .id = 0x14f15115, .name = "CX20757", + .patch = patch_conexant_auto }, {} /* terminator */ }; @@ -4659,6 +4665,9 @@ MODULE_ALIAS("snd-hda-codec-id:14f150b9"); MODULE_ALIAS("snd-hda-codec-id:14f1510f"); MODULE_ALIAS("snd-hda-codec-id:14f15110"); MODULE_ALIAS("snd-hda-codec-id:14f15111"); +MODULE_ALIAS("snd-hda-codec-id:14f15113"); +MODULE_ALIAS("snd-hda-codec-id:14f15114"); +MODULE_ALIAS("snd-hda-codec-id:14f15115"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Conexant HD-audio codec"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f5196277b6e9..cf3886171109 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6251,6 +6251,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), @@ -6265,6 +6266,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), + SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 80db3f4bcf7a..39d41068484f 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -11,11 +11,21 @@ lib/rbtree.c include/linux/swab.h arch/*/include/asm/unistd*.h arch/*/include/asm/perf_regs.h +arch/*/include/uapi/asm/unistd*.h +arch/*/include/uapi/asm/perf_regs.h arch/*/lib/memcpy*.S arch/*/lib/memset*.S include/linux/poison.h include/linux/magic.h include/linux/hw_breakpoint.h +include/linux/rbtree_augmented.h +include/uapi/linux/perf_event.h +include/uapi/linux/const.h +include/uapi/linux/swab.h +include/uapi/linux/hw_breakpoint.h arch/x86/include/asm/svm.h arch/x86/include/asm/vmx.h arch/x86/include/asm/kvm_host.h +arch/x86/include/uapi/asm/svm.h +arch/x86/include/uapi/asm/vmx.h +arch/x86/include/uapi/asm/kvm.h |