diff options
47 files changed, 264 insertions, 181 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 907de3dcf2b9..3722dc7f6b10 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1667,6 +1667,12 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com> S: Supported F: drivers/tty/serial/atmel_serial.c +ATMEL Audio ALSA driver +M: Bo Shen <voice.shen@atmel.com> +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Supported +F: sound/soc/atmel + ATMEL DMA DRIVER M: Nicolas Ferre <nicolas.ferre@atmel.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -4791,14 +4797,14 @@ M: Deepak Saxena <dsaxena@plexity.net> S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> M: Jesse Brandeburg <jesse.brandeburg@intel.com> M: Bruce Allan <bruce.w.allan@intel.com> M: Carolyn Wyborny <carolyn.wyborny@intel.com> M: Don Skidmore <donald.c.skidmore@intel.com> M: Greg Rose <gregory.v.rose@intel.com> -M: Alex Duyck <alexander.h.duyck@intel.com> +M: Matthew Vick <matthew.vick@intel.com> M: John Ronciak <john.ronciak@intel.com> M: Mitch Williams <mitch.a.williams@intel.com> M: Linux NICS <linux.nics@intel.com> @@ -5486,7 +5492,7 @@ F: drivers/macintosh/ LINUX FOR POWERPC EMBEDDED MPC5XXX M: Anatolij Gustschin <agust@denx.de> L: linuxppc-dev@lists.ozlabs.org -T: git git://git.denx.de/linux-2.6-agust.git +T: git git://git.denx.de/linux-denx-agust.git S: Maintained F: arch/powerpc/platforms/512x/ F: arch/powerpc/platforms/52xx/ diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e93e7f98358..61190f6b4829 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1658,10 +1658,8 @@ void cpufreq_suspend(void) if (!cpufreq_driver) return; - cpufreq_suspended = true; - if (!has_target()) - return; + goto suspend; pr_debug("%s: Suspending Governors\n", __func__); @@ -1674,6 +1672,9 @@ void cpufreq_suspend(void) pr_err("%s: Failed to suspend driver: %p\n", __func__, policy); } + +suspend: + cpufreq_suspended = true; } /** diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index c1320528b9d0..6bd69adc3c5e 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) return cpufreq_register_driver(&integrator_driver); } -static void __exit integrator_cpufreq_remove(struct platform_device *pdev) +static int __exit integrator_cpufreq_remove(struct platform_device *pdev) { - cpufreq_unregister_driver(&integrator_driver); + return cpufreq_unregister_driver(&integrator_driver); } static const struct of_device_id integrator_cpufreq_match[] = { diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 728a2d879499..4d2c8e861089 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, u32 input_buffer; int cpu; - spin_lock(&pcc_lock); cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); @@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); + spin_lock(&pcc_lock); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1411613f2174..e42925f76b4b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); } +static void i915_ggtt_flush(struct drm_i915_private *dev_priv) +{ + if (INTEL_INFO(dev_priv->dev)->gen < 6) { + intel_gtt_chipset_flush(); + } else { + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + } +} + void i915_gem_suspend_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) dev_priv->gtt.base.start, dev_priv->gtt.base.total, true); + + i915_ggtt_flush(dev_priv); } void i915_gem_restore_gtt_mappings(struct drm_device *dev) @@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); } - i915_gem_chipset_flush(dev); + i915_ggtt_flush(dev_priv); } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index ca52ad2ae7d1..d8de1d5140a7 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) return -EINVAL; } +/* + * If the vendor backlight interface is not in use and ACPI backlight interface + * is broken, do not bother processing backlight change requests from firmware. + */ +static bool should_ignore_backlight_request(void) +{ + return acpi_video_backlight_support() && + !acpi_video_verify_backlight_support(); +} + static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); - /* - * If the acpi_video interface is not supposed to be used, don't - * bother processing backlight level change requests from firmware. - */ - if (!acpi_video_verify_backlight_support()) { + if (should_ignore_backlight_request()) { DRM_DEBUG_KMS("opregion backlight request ignored\n"); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 4b5bb5d58a54..f8cbb512132f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp const int or = ffs(outp->or) - 1; const u32 loff = (or * 0x800) + (link * 0x80); const u16 mask = (outp->sorconf.link << 6) | outp->or; + struct dcb_output match; u8 ver, hdr; - if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) + if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); } diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 99cd9e4a2aa6..3440fc999f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) struct nouveau_software_chan *swch; struct nv_dma_v0 args = {}; int ret, i; + bool save; nvif_object_map(chan->object); @@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) } /* initialise synchronisation */ - return nouveau_fence(chan->drm)->context_new(chan); + save = cli->base.super; + cli->base.super = true; /* hack until fencenv50 fixed */ + ret = nouveau_fence(chan->drm)->context_new(chan); + cli->base.super = save; + return ret; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 65b4fd53dd4e..4a21b2b06ce2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev) } int -nouveau_display_suspend(struct drm_device *dev) +nouveau_display_suspend(struct drm_device *dev, bool runtime) { - struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; nouveau_display_fini(dev); - NV_INFO(drm, "unpinning framebuffer(s)...\n"); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev) } void -nouveau_display_repin(struct drm_device *dev) +nouveau_display_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; - int ret; + int ret, head; + /* re-pin fb/cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev) if (ret) NV_ERROR(drm, "Could not pin/map cursor.\n"); } -} - -void -nouveau_display_resume(struct drm_device *dev) -{ - struct drm_crtc *crtc; - int head; nouveau_display_init(dev); @@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev) for (head = 0; head < dev->mode_config.num_crtc; head++) drm_vblank_on(dev, head); + /* This should ensure we don't hit a locking problem when someone + * wakes us up via a connector. We should never go into suspend + * while the display is on anyways. + */ + if (runtime) + return; + drm_helper_resume_force_mode(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 88ca177cb1c7..be3d5947c6be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev); void nouveau_display_fini(struct drm_device *dev); -int nouveau_display_suspend(struct drm_device *dev); -void nouveau_display_repin(struct drm_device *dev); -void nouveau_display_resume(struct drm_device *dev); +int nouveau_display_suspend(struct drm_device *dev, bool runtime); +void nouveau_display_resume(struct drm_device *dev, bool runtime); int nouveau_display_vblank_enable(struct drm_device *, int); void nouveau_display_vblank_disable(struct drm_device *, int); int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 9c3af96a7153..3ed32dd90303 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) struct nouveau_cli *cli; int ret; - if (dev->mode_config.num_crtc && !runtime) { + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "suspending console...\n"); + nouveau_fbcon_set_suspend(dev, 1); NV_INFO(drm, "suspending display...\n"); - ret = nouveau_display_suspend(dev); + ret = nouveau_display_suspend(dev, runtime); if (ret) return ret; } @@ -603,7 +605,7 @@ fail_client: fail_display: if (dev->mode_config.num_crtc) { NV_INFO(drm, "resuming display...\n"); - nouveau_display_resume(dev); + nouveau_display_resume(dev, runtime); } return ret; } @@ -618,9 +620,6 @@ int nouveau_pmops_suspend(struct device *dev) drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) return 0; - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 1); - ret = nouveau_do_suspend(drm_dev, false); if (ret) return ret; @@ -633,7 +632,7 @@ int nouveau_pmops_suspend(struct device *dev) } static int -nouveau_do_resume(struct drm_device *dev) +nouveau_do_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; @@ -658,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev) if (dev->mode_config.num_crtc) { NV_INFO(drm, "resuming display...\n"); - nouveau_display_repin(dev); + nouveau_display_resume(dev, runtime); + NV_INFO(drm, "resuming console...\n"); + nouveau_fbcon_set_suspend(dev, 0); } return 0; @@ -681,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = nouveau_do_resume(drm_dev); - if (ret) - return ret; - - if (drm_dev->mode_config.num_crtc) { - nouveau_display_resume(drm_dev); - nouveau_fbcon_set_suspend(drm_dev, 0); - } - - return 0; + return nouveau_do_resume(drm_dev, false); } static int nouveau_pmops_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 1); - - ret = nouveau_do_suspend(drm_dev, false); - return ret; + return nouveau_do_suspend(drm_dev, false); } static int nouveau_pmops_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - ret = nouveau_do_resume(drm_dev); - if (ret) - return ret; - - if (drm_dev->mode_config.num_crtc) { - nouveau_display_resume(drm_dev); - nouveau_fbcon_set_suspend(drm_dev, 0); - } - - return 0; + return nouveau_do_resume(drm_dev, false); } @@ -977,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = nouveau_do_resume(drm_dev); + ret = nouveau_do_resume(drm_dev, true); drm_kms_helper_poll_enable(drm_dev); /* do magic */ nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 8bdd27091db8..49fe6075cc7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work); + console_lock(); + nouveau_fbcon_accel_restore(fbcon->dev); + nouveau_fbcon_zfill(fbcon->dev, fbcon); + fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING); + console_unlock(); +} int nouveau_fbcon_init(struct drm_device *dev) @@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev) if (!fbcon) return -ENOMEM; + INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work); fbcon->dev = dev; drm->fbcon = fbcon; @@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); if (drm->fbcon) { - console_lock(); - if (state == 0) { - nouveau_fbcon_accel_restore(dev); - nouveau_fbcon_zfill(dev, drm->fbcon); + if (state == FBINFO_STATE_RUNNING) { + schedule_work(&drm->fbcon->work); + return; } + flush_work(&drm->fbcon->work); + console_lock(); fb_set_suspend(drm->fbcon->helper.fbdev, state); - if (state == 1) - nouveau_fbcon_accel_save_disable(dev); + nouveau_fbcon_accel_save_disable(dev); console_unlock(); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 34658cfa8f5d..0b465c7d3907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -36,6 +36,7 @@ struct nouveau_fbdev { struct nouveau_framebuffer nouveau_fb; struct list_head fbdev_list; struct drm_device *dev; + struct work_struct work; unsigned int saved_flags; struct nvif_object surf2d; struct nvif_object clip; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 183588b11fc1..9f0fbecd1eb5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -64,6 +64,10 @@ #define cpu_to_group(cpu) cpu_to_node(cpu) #define ANY_GROUP NUMA_NO_NODE +static bool devices_handle_discard_safely = false; +module_param(devices_handle_discard_safely, bool, 0644); +MODULE_PARM_DESC(devices_handle_discard_safely, + "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); static struct workqueue_struct *raid5_wq; /* * Stripe cache @@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev) mddev->queue->limits.discard_granularity = stripe; /* * unaligned part of discard request will be ignored, so can't - * guarantee discard_zerors_data + * guarantee discard_zeroes_data */ mddev->queue->limits.discard_zeroes_data = 0; @@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev) !bdev_get_queue(rdev->bdev)-> limits.discard_zeroes_data) discard_supported = false; + /* Unfortunately, discard_zeroes_data is not currently + * a guarantee - just a hint. So we only allow DISCARD + * if the sysadmin has confirmed that only safe devices + * are in use by setting a module parameter. + */ + if (!devices_handle_discard_safely) { + if (discard_supported) { + pr_info("md/raid456: discard support disabled due to uncertainty.\n"); + pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); + } + discard_supported = false; + } } if (discard_supported && diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index a7e24848f6c8..9da812b8a786 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = { .disconnect = em28xx_usb_disconnect, .suspend = em28xx_usb_suspend, .resume = em28xx_usb_resume, + .reset_resume = em28xx_usb_resume, .id_table = em28xx_id_table, }; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 86e621142d5b..41095ebad97f 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2213,7 +2213,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 77f1ff7396ac..075688188644 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -857,7 +857,8 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) +static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, + struct net_device *dev) { struct sk_buff *nskb; struct bcm_tsb *tsb; @@ -873,7 +874,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) if (!nskb) { dev->stats.tx_errors++; dev->stats.tx_dropped++; - return -ENOMEM; + return NULL; } skb = nskb; } @@ -892,7 +893,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) ip_proto = ipv6_hdr(skb)->nexthdr; break; default: - return 0; + return skb; } /* Get the checksum offset and the L4 (transport) offset */ @@ -911,7 +912,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) tsb->l4_ptr_dest_map = csum_info; } - return 0; + return skb; } static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, @@ -945,8 +946,8 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, /* Insert TSB and checksum infos */ if (priv->tsb_en) { - ret = bcm_sysport_insert_tsb(skb, dev); - if (ret) { + skb = bcm_sysport_insert_tsb(skb, dev); + if (!skb) { ret = NETDEV_TX_OK; goto out; } diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 8ee3fdcc17cd..5fac411c52f4 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -3410,7 +3410,7 @@ bna_bfi_tx_enet_start(struct bna_tx *tx) cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); - cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; + cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ffc92a41d75b..153cafac323c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2864,7 +2864,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; - if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { + if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); return -EINVAL; } diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3e38f67c6011..8e9371a3388a 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -267,34 +267,6 @@ spider_net_set_promisc(struct spider_net_card *card) } /** - * spider_net_get_mac_address - read mac address from spider card - * @card: device structure - * - * reads MAC address from GMACUNIMACU and GMACUNIMACL registers - */ -static int -spider_net_get_mac_address(struct net_device *netdev) -{ - struct spider_net_card *card = netdev_priv(netdev); - u32 macl, macu; - - macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL); - macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU); - - netdev->dev_addr[0] = (macu >> 24) & 0xff; - netdev->dev_addr[1] = (macu >> 16) & 0xff; - netdev->dev_addr[2] = (macu >> 8) & 0xff; - netdev->dev_addr[3] = macu & 0xff; - netdev->dev_addr[4] = (macl >> 8) & 0xff; - netdev->dev_addr[5] = macl & 0xff; - - if (!is_valid_ether_addr(&netdev->dev_addr[0])) - return -EINVAL; - - return 0; -} - -/** * spider_net_get_descr_status -- returns the status of a descriptor * @descr: descriptor to look at * @@ -1345,15 +1317,17 @@ spider_net_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); + /* switch off GMACTPE and GMACRPE */ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); regvalue &= ~((1 << 5) | (1 << 6)); spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); /* write mac */ - macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) + - (addr->sa_data[2]<<8) + (addr->sa_data[3]); - macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]); + macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + + (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); + macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); @@ -1364,12 +1338,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) spider_net_set_promisc(card); - /* look up, whether we have been successful */ - if (spider_net_get_mac_address(netdev)) - return -EADDRNOTAVAIL; - if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len)) - return -EADDRNOTAVAIL; - return 0; } diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 977984bc238a..7d76c9523395 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -717,6 +717,7 @@ int netvsc_send(struct hv_device *device, unsigned int section_index = NETVSC_INVALID_INDEX; u32 msg_size = 0; struct sk_buff *skb; + u16 q_idx = packet->q_idx; net_device = get_outbound_net_device(device); @@ -781,24 +782,24 @@ int netvsc_send(struct hv_device *device, if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); - atomic_inc(&net_device->queue_sends[packet->q_idx]); + atomic_inc(&net_device->queue_sends[q_idx]); if (hv_ringbuf_avail_percent(&out_channel->outbound) < RING_AVAIL_PERCENT_LOWATER) { netif_tx_stop_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); + ndev, q_idx)); if (atomic_read(&net_device-> - queue_sends[packet->q_idx]) < 1) + queue_sends[q_idx]) < 1) netif_tx_wake_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); + ndev, q_idx)); } } else if (ret == -EAGAIN) { netif_tx_stop_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); - if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) { + ndev, q_idx)); + if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { netif_tx_wake_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); + ndev, q_idx)); ret = -ENOSPC; } } else { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a94a9df3e6bd..2368395d8ae5 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team) { if (!team->notify_peers.count || !netif_running(team->dev)) return; - atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); + atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); schedule_delayed_work(&team->notify_peers.dw, 0); } @@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team) { if (!team->mcast_rejoin.count || !netif_running(team->dev)) return; - atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); + atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); schedule_delayed_work(&team->mcast_rejoin.dw, 0); } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 5d194093f3e1..2c05f6cdb12f 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -890,7 +890,7 @@ static const struct driver_info ax88772_info = { .unbind = ax88772_unbind, .status = asix_status, .link_reset = ax88772_link_reset, - .reset = ax88772_reset, + .reset = ax88772_link_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 65326204baa0..5cfd414b9a3e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -980,9 +980,14 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; + int ret = -EADDRNOTAVAIL; if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; + goto out1; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out1; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -990,7 +995,9 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); - return 0; + usb_autopm_put_interface(tp->intf); +out1: + return ret; } static int set_ethernet_addr(struct r8152 *tp) diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a042d065a0c7..8be2096c8423 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -395,7 +395,8 @@ static void __init superio_serial_init(void) serial_port.iotype = UPIO_PORT; serial_port.type = PORT_16550A; serial_port.uartclk = 115200*16; - serial_port.fifosize = 16; + serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | + UPF_BOOT_AUTOCONF; /* serial port #1 */ serial_port.iobase = sio_dev.sp1_base; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index e3cfa0227026..12ba682fc53c 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2039,6 +2039,10 @@ kill: "and killing the other node now! This node is OK and can continue.\n"); __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); + spin_lock(&dlm->master_lock); + if (mle) + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); *ret_data = (void *)res; dlm_put(dlm); diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index f22538e68245..d4a20d00461c 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -115,7 +115,7 @@ typedef enum { * analysis of the state functions, but in reality just taken from * thin air in the hopes othat we don't trigger a kernel panic. */ -#define SCTP_MAX_NUM_COMMANDS 14 +#define SCTP_MAX_NUM_COMMANDS 20 typedef union { void *zero_all; /* Set to NULL to clear the entire union */ diff --git a/kernel/events/core.c b/kernel/events/core.c index d640a8b4dcbc..963bf139e2b2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7948,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child) for_each_task_context_nr(ctxn) { ret = perf_event_init_context(child, ctxn); - if (ret) + if (ret) { + perf_event_free_task(child); return ret; + } } return 0; diff --git a/kernel/fork.c b/kernel/fork.c index 0cf9cdb6e491..a91e47d86de2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) - goto bad_fork_cleanup_policy; + goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = copy_semundo(clone_flags, p); @@ -1566,8 +1566,9 @@ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); -bad_fork_cleanup_policy: +bad_fork_cleanup_perf: perf_event_free_task(p); +bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d9a21d06b862..f8ffd9412ec5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1795,14 +1795,17 @@ static int __split_huge_page_map(struct page *page, for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t *pte, entry; BUG_ON(PageCompound(page+i)); + /* + * Note that pmd_numa is not transferred deliberately + * to avoid any possibility that pte_numa leaks to + * a PROT_NONE VMA by accident. + */ entry = mk_pte(page + i, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (!pmd_write(*pmd)) entry = pte_wrprotect(entry); if (!pmd_young(*pmd)) entry = pte_mkold(entry); - if (pmd_numa(*pmd)) - entry = pte_mknuma(entry); pte = pte_offset_map(&_pmd, haddr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, haddr, pte, entry); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 085dc6d2f876..28928ce9b07f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -292,6 +292,9 @@ struct mem_cgroup { /* vmpressure notifications */ struct vmpressure vmpressure; + /* css_online() has been completed */ + int initialized; + /* * the counter to account for mem+swap usage. */ @@ -1099,10 +1102,21 @@ skip_node: * skipping css reference should be safe. */ if (next_css) { - if ((next_css == &root->css) || - ((next_css->flags & CSS_ONLINE) && - css_tryget_online(next_css))) - return mem_cgroup_from_css(next_css); + struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); + + if (next_css == &root->css) + return memcg; + + if (css_tryget_online(next_css)) { + /* + * Make sure the memcg is initialized: + * mem_cgroup_css_online() orders the the + * initialization against setting the flag. + */ + if (smp_load_acquire(&memcg->initialized)) + return memcg; + css_put(next_css); + } prev_css = next_css; goto skip_node; @@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); + int ret; if (css->id > MEM_CGROUP_ID_MAX) return -ENOSPC; @@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) } mutex_unlock(&memcg_create_mutex); - return memcg_init_kmem(memcg, &memory_cgrp_subsys); + ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); + if (ret) + return ret; + + /* + * Make sure the memcg is initialized: mem_cgroup_iter() + * orders reading memcg->initialized against its callers + * reading the memcg members. + */ + smp_store_release(&memcg->initialized, 1); + + return 0; } /* diff --git a/mm/migrate.c b/mm/migrate.c index f78ec9bd454d..2740360cd216 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); if (pte_swp_soft_dirty(*ptep)) pte = pte_mksoft_dirty(pte); + + /* Recheck VMA as permissions can change since migration started */ if (is_write_migration_entry(entry)) - pte = pte_mkwrite(pte); + pte = maybe_mkwrite(pte, vma); + #ifdef CONFIG_HUGETLB_PAGE if (PageHuge(new)) { pte = pte_mkhuge(pte); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 18cee0d4c8a2..eee961958021 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1612,7 +1612,7 @@ again: } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); - if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && + if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && !zone_is_fair_depleted(zone)) zone_set_flag(zone, ZONE_FAIR_DEPLETED); @@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void) zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); __mod_zone_page_state(zone, NR_ALLOC_BATCH, - high_wmark_pages(zone) - - low_wmark_pages(zone) - - zone_page_state(zone, NR_ALLOC_BATCH)); + high_wmark_pages(zone) - low_wmark_pages(zone) - + atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fa1270cc5086..1bada53bb195 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -316,6 +316,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) ETH_HLEN-ETH_ALEN); /* tell br_dev_xmit to continue with forwarding */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; + /* FIXME Need to refragment */ ret = neigh->output(neigh, skb); } neigh_release(neigh); @@ -371,6 +372,10 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; + int frag_max_size; + + frag_max_size = IPCB(skb)->frag_max_size; + BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; @@ -775,13 +780,19 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; + int frag_max_size; + /* This is wrong! We should preserve the original fragment + * boundaries by preserving frag_list rather than refragmenting. + */ if (skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && !skb_is_gso(skb)) { + frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; if (br_parse_ip_options(skb)) /* Drop invalid packet */ return NF_DROP; + IPCB(skb)->frag_max_size = frag_max_size; ret = ip_fragment(skb, br_dev_queue_push_xmit); } else ret = br_dev_queue_push_xmit(skb); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d8cbaa694227..4d783d071305 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -306,10 +306,14 @@ struct net_bridge struct br_input_skb_cb { struct net_device *brdev; + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING int igmp; int mrouters_only; #endif + + u16 frag_max_size; + #ifdef CONFIG_BRIDGE_VLAN_FILTERING bool vlan_filtered; #endif diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index de3b1c86b8d3..12c3c8ef3849 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -786,7 +786,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_IPIP; + fl6.flowi6_proto = IPPROTO_GRE; dsfield = ipv4_get_dsfield(iph); @@ -836,7 +836,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_IPV6; + fl6.flowi6_proto = IPPROTO_GRE; dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) diff --git a/net/rds/send.c b/net/rds/send.c index 23718160d71e..0a64541020b0 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status) sock_put(rds_rs_to_sk(rs)); } rs = rm->m_rs; - sock_hold(rds_rs_to_sk(rs)); + if (rs) + sock_hold(rds_rs_to_sk(rs)); } + if (!rs) + goto unlock_and_drop; spin_lock(&rs->rs_lock); if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { @@ -638,9 +641,6 @@ unlock_and_drop: * queue. This means that in the TCP case, the message may not have been * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked * checks the RDS_MSG_HAS_ACK_SEQ bit. - * - * XXX It's not clear to me how this is safely serialized with socket - * destruction. Maybe it should bail if it sees SOCK_DEAD. */ void rds_send_drop_acked(struct rds_connection *conn, u64 ack, is_acked_func is_acked) @@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) */ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { spin_unlock_irqrestore(&conn->c_lock, flags); + spin_lock_irqsave(&rm->m_rs_lock, flags); + rm->m_rs = NULL; + spin_unlock_irqrestore(&rm->m_rs_lock, flags); continue; } list_del_init(&rm->m_conn_item); diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index a65ee78db0c5..f9f564a6c960 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -106,11 +106,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn) rds_tcp_set_callbacks(sock, conn); ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), O_NONBLOCK); - sock = NULL; rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); if (ret == -EINPROGRESS) ret = 0; + if (ret == 0) + sock = NULL; + else + rds_tcp_restore_callbacks(sock, conn->c_transport_data); out: if (sock) diff --git a/net/rds/threads.c b/net/rds/threads.c index 65eaefcab241..dc2402e871fd 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -78,8 +78,7 @@ void rds_connect_complete(struct rds_connection *conn) "current state is %d\n", __func__, atomic_read(&conn->c_state)); - atomic_set(&conn->c_state, RDS_CONN_ERROR); - queue_work(rds_wq, &conn->c_down_w); + rds_conn_drop(conn); return; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 77147c8c4acc..aad6a679fb13 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, tcf_tree_lock(tp); list_splice_init(&dst->actions, &tmp); list_splice(&src->actions, &dst->actions); + dst->type = src->type; tcf_tree_unlock(tp); tcf_action_destroy(&tmp, TCA_ACT_UNBIND); #endif diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 8250c36543d8..6742200b1307 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -528,9 +528,10 @@ pop_stack: match_idx = stack[--stackp]; cur_match = tcf_em_get_match(tree, match_idx); + if (tcf_em_is_inverted(cur_match)) + res = !res; + if (tcf_em_early_end(cur_match, res)) { - if (tcf_em_is_inverted(cur_match)) - res = !res; goto pop_stack; } else { match_idx++; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d3f1ea460c50..c8f606324134 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, - SCTP_STATE(SCTP_STATE_ESTABLISHED)); - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + if (sctp_state(asoc, SHUTDOWN_PENDING) && + (sctp_sstate(asoc->base.sk, CLOSING) || + sock_flag(asoc->base.sk, SOCK_DEAD))) { + /* if were currently in SHUTDOWN_PENDING, but the socket + * has been closed by user, don't transition to ESTABLISHED. + * Instead trigger SHUTDOWN bundled with COOKIE_ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, + SCTP_ST_CHUNK(0), NULL, + commands); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + } return SCTP_DISPOSITION_CONSUME; nomem_ev: diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index e4f6102efc1a..b86b426f159d 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -51,7 +51,7 @@ static struct reg_default rt286_index_def[] = { { 0x04, 0xaf01 }, { 0x08, 0x000d }, { 0x09, 0xd810 }, - { 0x0a, 0x0060 }, + { 0x0a, 0x0120 }, { 0x0b, 0x0000 }, { 0x0d, 0x2800 }, { 0x0f, 0x0000 }, @@ -60,7 +60,7 @@ static struct reg_default rt286_index_def[] = { { 0x33, 0x0208 }, { 0x49, 0x0004 }, { 0x4f, 0x50e9 }, - { 0x50, 0x2c00 }, + { 0x50, 0x2000 }, { 0x63, 0x2902 }, { 0x67, 0x1111 }, { 0x68, 0x1016 }, @@ -104,7 +104,6 @@ static const struct reg_default rt286_reg[] = { { 0x02170700, 0x00000000 }, { 0x02270100, 0x00000000 }, { 0x02370100, 0x00000000 }, - { 0x02040000, 0x00004002 }, { 0x01870700, 0x00000020 }, { 0x00830000, 0x000000c3 }, { 0x00930000, 0x000000c3 }, @@ -192,7 +191,6 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value) /*handle index registers*/ if (reg <= 0xff) { rt286_hw_write(client, RT286_COEF_INDEX, reg); - reg = RT286_PROC_COEF; for (i = 0; i < INDEX_CACHE_SIZE; i++) { if (reg == rt286->index_cache[i].reg) { rt286->index_cache[i].def = value; @@ -200,6 +198,7 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value) } } + reg = RT286_PROC_COEF; } data[0] = (reg >> 24) & 0xff; diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 484b3bbe8624..4021cd435740 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c @@ -647,7 +647,7 @@ int ssm2602_probe(struct device *dev, enum ssm2602_type type, return -ENOMEM; dev_set_drvdata(dev, ssm2602); - ssm2602->type = SSM2602; + ssm2602->type = type; ssm2602->regmap = regmap; return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602, diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 87eb5776a39b..de6ab06f58a5 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -748,8 +748,9 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream, return 0; } -static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, - unsigned int fmt) +static int _fsl_ssi_set_dai_fmt(struct device *dev, + struct fsl_ssi_private *ssi_private, + unsigned int fmt) { struct regmap *regs = ssi_private->regs; u32 strcr = 0, stcr, srcr, scr, mask; @@ -758,7 +759,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, ssi_private->dai_fmt = fmt; if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { - dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n"); + dev_err(dev, "baudclk is missing which is necessary for master mode\n"); return -EINVAL; } @@ -913,7 +914,7 @@ static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); - return _fsl_ssi_set_dai_fmt(ssi_private, fmt); + return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt); } /** @@ -1387,7 +1388,8 @@ static int fsl_ssi_probe(struct platform_device *pdev) done: if (ssi_private->dai_fmt) - _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt); + _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private, + ssi_private->dai_fmt); return 0; diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 3092b58fede6..cecfab3cc948 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -102,13 +102,11 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream) fe->dpcm[stream].runtime = fe_substream->runtime; ret = dpcm_path_get(fe, stream, &list); - if (ret < 0) { - mutex_unlock(&fe->card->mutex); + if (ret < 0) goto fe_err; - } else if (ret == 0) { + else if (ret == 0) dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", fe->dai_link->name, stream ? "capture" : "playback"); - } /* calculate valid and active FE <-> BE dpcms */ dpcm_process_paths(fe, stream, &list, 1); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 889f4e3d35dc..d074aa91b023 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3203,7 +3203,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, unsigned int val, mask; void *data; - if (!component->regmap) + if (!component->regmap || !params->num_regs) return -EINVAL; len = params->num_regs * component->val_bytes; |