summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-26 14:51:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-26 14:51:15 -0700
commitd87823813fe498fdd47894bd28e460a9dee8d771 (patch)
tree214eaf3babd0d61f08022fc1edd99a5128616548 /drivers/misc
parente382608254e06c8109f40044f5e693f2e04f3899 (diff)
parent3dc196eae1db548f05e53e5875ff87b8ff79f249 (diff)
Merge tag 'char-misc-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here's the big char/misc driver pull request for 4.2-rc1. Lots of mei, extcon, coresight, uio, mic, and other driver updates in here. Full details in the shortlog. All of these have been in linux-next for some time with no reported problems" * tag 'char-misc-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (176 commits) mei: me: wait for power gating exit confirmation mei: reset flow control on the last client disconnection MAINTAINERS: mei: add mei_cl_bus.h to maintained file list misc: sram: sort and clean up included headers misc: sram: move reserved block logic out of probe function misc: sram: add private struct device and virt_base members misc: sram: report correct SRAM pool size misc: sram: bump error message level on unclean driver unbinding misc: sram: fix device node reference leak on error misc: sram: fix enabled clock leak on error path misc: mic: Fix reported static checker warning misc: mic: Fix randconfig build error by including errno.h uio: pruss: Drop depends on ARCH_DAVINCI_DA850 from config uio: pruss: Add CONFIG_HAS_IOMEM dependence uio: pruss: Include <linux/sizes.h> extcon: Redefine the unique id of supported external connectors without 'enum extcon' type char:xilinx_hwicap:buffer_icap - change 1/0 to true/false for bool type variable in function buffer_icap_set_configuration(). Drivers: hv: vmbus: Allocate ring buffer memory in NUMA aware fashion parport: check exclusive access before register w1: use correct lock on error in w1_seq_show() ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/Kconfig15
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1182
-rw-r--r--drivers/misc/carma/carma-fpga.c1507
-rw-r--r--drivers/misc/mei/amthif.c28
-rw-r--r--drivers/misc/mei/bus.c150
-rw-r--r--drivers/misc/mei/client.c473
-rw-r--r--drivers/misc/mei/client.h114
-rw-r--r--drivers/misc/mei/debugfs.c15
-rw-r--r--drivers/misc/mei/hbm.c16
-rw-r--r--drivers/misc/mei/hw-me.c59
-rw-r--r--drivers/misc/mei/hw-txe.c33
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/mei/interrupt.c95
-rw-r--r--drivers/misc/mei/main.c57
-rw-r--r--drivers/misc/mei/mei_dev.h102
-rw-r--r--drivers/misc/mei/nfc.c223
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mei/wd.c22
-rw-r--r--drivers/misc/mic/Kconfig40
-rw-r--r--drivers/misc/mic/Makefile3
-rw-r--r--drivers/misc/mic/bus/Makefile1
-rw-r--r--drivers/misc/mic/bus/scif_bus.c210
-rw-r--r--drivers/misc/mic/bus/scif_bus.h129
-rw-r--r--drivers/misc/mic/card/mic_device.c132
-rw-r--r--drivers/misc/mic/card/mic_device.h11
-rw-r--r--drivers/misc/mic/card/mic_x100.c61
-rw-r--r--drivers/misc/mic/card/mic_x100.h1
-rw-r--r--drivers/misc/mic/common/mic_dev.h3
-rw-r--r--drivers/misc/mic/host/mic_boot.c264
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c13
-rw-r--r--drivers/misc/mic/host/mic_device.h11
-rw-r--r--drivers/misc/mic/host/mic_intr.h3
-rw-r--r--drivers/misc/mic/host/mic_main.c6
-rw-r--r--drivers/misc/mic/host/mic_smpt.c7
-rw-r--r--drivers/misc/mic/host/mic_smpt.h1
-rw-r--r--drivers/misc/mic/host/mic_virtio.c6
-rw-r--r--drivers/misc/mic/host/mic_x100.c3
-rw-r--r--drivers/misc/mic/scif/Makefile15
-rw-r--r--drivers/misc/mic/scif/scif_api.c1276
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c85
-rw-r--r--drivers/misc/mic/scif/scif_epd.c353
-rw-r--r--drivers/misc/mic/scif/scif_epd.h160
-rw-r--r--drivers/misc/mic/scif/scif_fd.c303
-rw-r--r--drivers/misc/mic/scif/scif_main.c388
-rw-r--r--drivers/misc/mic/scif/scif_main.h254
-rw-r--r--drivers/misc/mic/scif/scif_map.h113
-rw-r--r--drivers/misc/mic/scif/scif_nm.c237
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c1312
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.h183
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.c124
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.h65
-rw-r--r--drivers/misc/mic/scif/scif_ports.c124
-rw-r--r--drivers/misc/mic/scif/scif_rb.c249
-rw-r--r--drivers/misc/mic/scif/scif_rb.h100
-rw-r--r--drivers/misc/sram.c137
-rw-r--r--drivers/misc/ti-st/st_kim.c3
59 files changed, 7022 insertions, 3469 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 006242c8bca0..42c38525904b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -520,7 +520,6 @@ source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
-source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7d5c4cd118c4..d056fb7186fe 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-y += lis3lv02d/
-obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
deleted file mode 100644
index 295882bfb14e..000000000000
--- a/drivers/misc/carma/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config CARMA_FPGA
- tristate "CARMA DATA-FPGA Access Driver"
- depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
- default n
- help
- Say Y here to include support for communicating with the data
- processing FPGAs on the OVRO CARMA board.
-
-config CARMA_FPGA_PROGRAM
- tristate "CARMA DATA-FPGA Programmer"
- depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
- default n
- help
- Say Y here to include support for programming the data processing
- FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
deleted file mode 100644
index ff36ac2ce534..000000000000
--- a/drivers/misc/carma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
-obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
deleted file mode 100644
index 0b1bd85e4ae6..000000000000
--- a/drivers/misc/carma/carma-fpga-program.c
+++ /dev/null
@@ -1,1182 +0,0 @@
-/*
- * CARMA Board DATA-FPGA Programmer
- *
- * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/completion.h>
-#include <linux/miscdevice.h>
-#include <linux/dmaengine.h>
-#include <linux/fsldma.h>
-#include <linux/interrupt.h>
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/leds.h>
-#include <linux/slab.h>
-#include <linux/kref.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-
-/* MPC8349EMDS specific get_immrbase() */
-#include <sysdev/fsl_soc.h>
-
-static const char drv_name[] = "carma-fpga-program";
-
-/*
- * Firmware images are always this exact size
- *
- * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
- * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
- */
-#define FW_SIZE_EP2S90 12849552
-#define FW_SIZE_EP2S130 18662880
-
-struct fpga_dev {
- struct miscdevice miscdev;
-
- /* Reference count */
- struct kref ref;
-
- /* Device Registers */
- struct device *dev;
- void __iomem *regs;
- void __iomem *immr;
-
- /* Freescale DMA Device */
- struct dma_chan *chan;
-
- /* Interrupts */
- int irq, status;
- struct completion completion;
-
- /* FPGA Bitfile */
- struct mutex lock;
-
- void *vaddr;
- struct scatterlist *sglist;
- int sglen;
- int nr_pages;
- bool buf_allocated;
-
- /* max size and written bytes */
- size_t fw_size;
- size_t bytes;
-};
-
-static int fpga_dma_init(struct fpga_dev *priv, int nr_pages)
-{
- struct page *pg;
- int i;
-
- priv->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == priv->vaddr) {
- pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
- return -ENOMEM;
- }
-
- pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)priv->vaddr,
- nr_pages << PAGE_SHIFT);
-
- memset(priv->vaddr, 0, nr_pages << PAGE_SHIFT);
- priv->nr_pages = nr_pages;
-
- priv->sglist = vzalloc(priv->nr_pages * sizeof(*priv->sglist));
- if (NULL == priv->sglist)
- goto vzalloc_err;
-
- sg_init_table(priv->sglist, priv->nr_pages);
- for (i = 0; i < priv->nr_pages; i++) {
- pg = vmalloc_to_page(priv->vaddr + i * PAGE_SIZE);
- if (NULL == pg)
- goto vmalloc_to_page_err;
- sg_set_page(&priv->sglist[i], pg, PAGE_SIZE, 0);
- }
- return 0;
-
-vmalloc_to_page_err:
- vfree(priv->sglist);
- priv->sglist = NULL;
-vzalloc_err:
- vfree(priv->vaddr);
- priv->vaddr = NULL;
- return -ENOMEM;
-}
-
-static int fpga_dma_map(struct fpga_dev *priv)
-{
- priv->sglen = dma_map_sg(priv->dev, priv->sglist,
- priv->nr_pages, DMA_TO_DEVICE);
-
- if (0 == priv->sglen) {
- pr_warn("%s: dma_map_sg failed\n", __func__);
- return -ENOMEM;
- }
- return 0;
-}
-
-static int fpga_dma_unmap(struct fpga_dev *priv)
-{
- if (!priv->sglen)
- return 0;
-
- dma_unmap_sg(priv->dev, priv->sglist, priv->sglen, DMA_TO_DEVICE);
- priv->sglen = 0;
- return 0;
-}
-
-/*
- * FPGA Bitfile Helpers
- */
-
-/**
- * fpga_drop_firmware_data() - drop the bitfile image from memory
- * @priv: the driver's private data structure
- *
- * LOCKING: must hold priv->lock
- */
-static void fpga_drop_firmware_data(struct fpga_dev *priv)
-{
- vfree(priv->sglist);
- vfree(priv->vaddr);
- priv->buf_allocated = false;
- priv->bytes = 0;
-}
-
-/*
- * Private Data Reference Count
- */
-
-static void fpga_dev_remove(struct kref *ref)
-{
- struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
-
- /* free any firmware image that was not programmed */
- fpga_drop_firmware_data(priv);
-
- mutex_destroy(&priv->lock);
- kfree(priv);
-}
-
-/*
- * LED Trigger (could be a seperate module)
- */
-
-/*
- * NOTE: this whole thing does have the problem that whenever the led's are
- * NOTE: first set to use the fpga trigger, they could be in the wrong state
- */
-
-DEFINE_LED_TRIGGER(ledtrig_fpga);
-
-static void ledtrig_fpga_programmed(bool enabled)
-{
- if (enabled)
- led_trigger_event(ledtrig_fpga, LED_FULL);
- else
- led_trigger_event(ledtrig_fpga, LED_OFF);
-}
-
-/*
- * FPGA Register Helpers
- */
-
-/* Register Definitions */
-#define FPGA_CONFIG_CONTROL 0x40
-#define FPGA_CONFIG_STATUS 0x44
-#define FPGA_CONFIG_FIFO_SIZE 0x48
-#define FPGA_CONFIG_FIFO_USED 0x4C
-#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
-#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
-
-#define FPGA_FIFO_ADDRESS 0x3000
-
-static int fpga_fifo_size(void __iomem *regs)
-{
- return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
-}
-
-#define CFG_STATUS_ERR_MASK 0xfffe
-
-static int fpga_config_error(void __iomem *regs)
-{
- return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
-}
-
-static int fpga_fifo_empty(void __iomem *regs)
-{
- return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
-}
-
-static void fpga_fifo_write(void __iomem *regs, u32 val)
-{
- iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
-}
-
-static void fpga_set_byte_count(void __iomem *regs, u32 count)
-{
- iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
-}
-
-#define CFG_CTL_ENABLE (1 << 0)
-#define CFG_CTL_RESET (1 << 1)
-#define CFG_CTL_DMA (1 << 2)
-
-static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
-{
- u32 val;
-
- val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
- iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
-}
-
-static void fpga_programmer_disable(struct fpga_dev *priv)
-{
- iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
-}
-
-static void fpga_dump_registers(struct fpga_dev *priv)
-{
- u32 control, status, size, used, total, curr;
-
- /* good status: do nothing */
- if (priv->status == 0)
- return;
-
- /* Dump all status registers */
- control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
- status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
- size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
- used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
- total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
- curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
-
- dev_err(priv->dev, "Configuration failed, dumping status registers\n");
- dev_err(priv->dev, "Control: 0x%.8x\n", control);
- dev_err(priv->dev, "Status: 0x%.8x\n", status);
- dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
- dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
- dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
- dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
-}
-
-/*
- * FPGA Power Supply Code
- */
-
-#define CTL_PWR_CONTROL 0x2006
-#define CTL_PWR_STATUS 0x200A
-#define CTL_PWR_FAIL 0x200B
-
-#define PWR_CONTROL_ENABLE 0x01
-
-#define PWR_STATUS_ERROR_MASK 0x10
-#define PWR_STATUS_GOOD 0x0f
-
-/*
- * Determine if the FPGA power is good for all supplies
- */
-static bool fpga_power_good(struct fpga_dev *priv)
-{
- u8 val;
-
- val = ioread8(priv->regs + CTL_PWR_STATUS);
- if (val & PWR_STATUS_ERROR_MASK)
- return false;
-
- return val == PWR_STATUS_GOOD;
-}
-
-/*
- * Disable the FPGA power supplies
- */
-static void fpga_disable_power_supplies(struct fpga_dev *priv)
-{
- unsigned long start;
- u8 val;
-
- iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
-
- /*
- * Wait 500ms for the power rails to discharge
- *
- * Without this delay, the CTL-CPLD state machine can get into a
- * state where it is waiting for the power-goods to assert, but they
- * never do. This only happens when enabling and disabling the
- * power sequencer very rapidly.
- *
- * The loop below will also wait for the power goods to de-assert,
- * but testing has shown that they are always disabled by the time
- * the sleep completes. However, omitting the sleep and only waiting
- * for the power-goods to de-assert was not sufficient to ensure
- * that the power sequencer would not wedge itself.
- */
- msleep(500);
-
- start = jiffies;
- while (time_before(jiffies, start + HZ)) {
- val = ioread8(priv->regs + CTL_PWR_STATUS);
- if (!(val & PWR_STATUS_GOOD))
- break;
-
- usleep_range(5000, 10000);
- }
-
- val = ioread8(priv->regs + CTL_PWR_STATUS);
- if (val & PWR_STATUS_GOOD) {
- dev_err(priv->dev, "power disable failed: "
- "power goods: status 0x%.2x\n", val);
- }
-
- if (val & PWR_STATUS_ERROR_MASK) {
- dev_err(priv->dev, "power disable failed: "
- "alarm bit set: status 0x%.2x\n", val);
- }
-}
-
-/**
- * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
- * @priv: the driver's private data structure
- *
- * Enable the DATA-FPGA power supplies, waiting up to 1 second for
- * them to enable successfully.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int fpga_enable_power_supplies(struct fpga_dev *priv)
-{
- unsigned long start = jiffies;
-
- if (fpga_power_good(priv)) {
- dev_dbg(priv->dev, "power was already good\n");
- return 0;
- }
-
- iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
- while (time_before(jiffies, start + HZ)) {
- if (fpga_power_good(priv))
- return 0;
-
- usleep_range(5000, 10000);
- }
-
- return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
-}
-
-/*
- * Determine if the FPGA power supplies are all enabled
- */
-static bool fpga_power_enabled(struct fpga_dev *priv)
-{
- u8 val;
-
- val = ioread8(priv->regs + CTL_PWR_CONTROL);
- if (val & PWR_CONTROL_ENABLE)
- return true;
-
- return false;
-}
-
-/*
- * Determine if the FPGA's are programmed and running correctly
- */
-static bool fpga_running(struct fpga_dev *priv)
-{
- if (!fpga_power_good(priv))
- return false;
-
- /* Check the config done bit */
- return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
-}
-
-/*
- * FPGA Programming Code
- */
-
-/**
- * fpga_program_block() - put a block of data into the programmer's FIFO
- * @priv: the driver's private data structure
- * @buf: the data to program
- * @count: the length of data to program (must be a multiple of 4 bytes)
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
-{
- u32 *data = buf;
- int size = fpga_fifo_size(priv->regs);
- int i, len;
- unsigned long timeout;
-
- /* enforce correct data length for the FIFO */
- BUG_ON(count % 4 != 0);
-
- while (count > 0) {
-
- /* Get the size of the block to write (maximum is FIFO_SIZE) */
- len = min_t(size_t, count, size);
- timeout = jiffies + HZ / 4;
-
- /* Write the block */
- for (i = 0; i < len / 4; i++)
- fpga_fifo_write(priv->regs, data[i]);
-
- /* Update the amounts left */
- count -= len;
- data += len / 4;
-
- /* Wait for the fifo to empty */
- while (true) {
-
- if (fpga_fifo_empty(priv->regs)) {
- break;
- } else {
- dev_dbg(priv->dev, "Fifo not empty\n");
- cpu_relax();
- }
-
- if (fpga_config_error(priv->regs)) {
- dev_err(priv->dev, "Error detected\n");
- return -EIO;
- }
-
- if (time_after(jiffies, timeout)) {
- dev_err(priv->dev, "Fifo drain timeout\n");
- return -ETIMEDOUT;
- }
-
- usleep_range(5000, 10000);
- }
- }
-
- return 0;
-}
-
-/**
- * fpga_program_cpu() - program the DATA-FPGA's using the CPU
- * @priv: the driver's private data structure
- *
- * This is useful when the DMA programming method fails. It is possible to
- * wedge the Freescale DMA controller such that the DMA programming method
- * always fails. This method has always succeeded.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static noinline int fpga_program_cpu(struct fpga_dev *priv)
-{
- int ret;
- unsigned long timeout;
-
- /* Disable the programmer */
- fpga_programmer_disable(priv);
-
- /* Set the total byte count */
- fpga_set_byte_count(priv->regs, priv->bytes);
- dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
-
- /* Enable the controller for programming */
- fpga_programmer_enable(priv, false);
- dev_dbg(priv->dev, "enabled the controller\n");
-
- /* Write each chunk of the FPGA bitfile to FPGA programmer */
- ret = fpga_program_block(priv, priv->vaddr, priv->bytes);
- if (ret)
- goto out_disable_controller;
-
- /* Wait for the interrupt handler to signal that programming finished */
- timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
- if (!timeout) {
- dev_err(priv->dev, "Timed out waiting for completion\n");
- ret = -ETIMEDOUT;
- goto out_disable_controller;
- }
-
- /* Retrieve the status from the interrupt handler */
- ret = priv->status;
-
-out_disable_controller:
- fpga_programmer_disable(priv);
- return ret;
-}
-
-#define FIFO_DMA_ADDRESS 0xf0003000
-#define FIFO_MAX_LEN 4096
-
-/**
- * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
- * @priv: the driver's private data structure
- *
- * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
- * the engine is programmed such that the hardware DMA request lines can
- * control the entire DMA transaction. The system controller FPGA then
- * completely offloads the programming from the CPU.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static noinline int fpga_program_dma(struct fpga_dev *priv)
-{
- struct dma_chan *chan = priv->chan;
- struct dma_async_tx_descriptor *tx;
- size_t num_pages, len, avail = 0;
- struct dma_slave_config config;
- struct scatterlist *sg;
- struct sg_table table;
- dma_cookie_t cookie;
- int ret, i;
- unsigned long timeout;
-
- /* Disable the programmer */
- fpga_programmer_disable(priv);
-
- /* Allocate a scatterlist for the DMA destination */
- num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
- ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
- if (ret) {
- dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
- ret = -ENOMEM;
- goto out_return;
- }
-
- /*
- * This is an ugly hack
- *
- * We fill in a scatterlist as if it were mapped for DMA. This is
- * necessary because there exists no better structure for this
- * inside the kernel code.
- *
- * As an added bonus, we can use the DMAEngine API for all of this,
- * rather than inventing another extremely similar API.
- */
- avail = priv->bytes;
- for_each_sg(table.sgl, sg, num_pages, i) {
- len = min_t(size_t, avail, FIFO_MAX_LEN);
- sg_dma_address(sg) = FIFO_DMA_ADDRESS;
- sg_dma_len(sg) = len;
-
- avail -= len;
- }
-
- /* Map the buffer for DMA */
- ret = fpga_dma_map(priv);
- if (ret) {
- dev_err(priv->dev, "Unable to map buffer for DMA\n");
- goto out_free_table;
- }
-
- /*
- * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
- * transaction, and then put it under external control
- */
- memset(&config, 0, sizeof(config));
- config.direction = DMA_MEM_TO_DEV;
- config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
- ret = dmaengine_slave_config(chan, &config);
- if (ret) {
- dev_err(priv->dev, "DMA slave configuration failed\n");
- goto out_dma_unmap;
- }
-
- ret = fsl_dma_external_start(chan, 1);
- if (ret) {
- dev_err(priv->dev, "DMA external control setup failed\n");
- goto out_dma_unmap;
- }
-
- /* setup and submit the DMA transaction */
-
- tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages,
- priv->sglist, priv->sglen, 0);
- if (!tx) {
- dev_err(priv->dev, "Unable to prep DMA transaction\n");
- ret = -ENOMEM;
- goto out_dma_unmap;
- }
-
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- dev_err(priv->dev, "Unable to submit DMA transaction\n");
- ret = -ENOMEM;
- goto out_dma_unmap;
- }
-
- dma_async_issue_pending(chan);
-
- /* Set the total byte count */
- fpga_set_byte_count(priv->regs, priv->bytes);
- dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
-
- /* Enable the controller for DMA programming */
- fpga_programmer_enable(priv, true);
- dev_dbg(priv->dev, "enabled the controller\n");
-
- /* Wait for the interrupt handler to signal that programming finished */
- timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
- if (!timeout) {
- dev_err(priv->dev, "Timed out waiting for completion\n");
- ret = -ETIMEDOUT;
- goto out_disable_controller;
- }
-
- /* Retrieve the status from the interrupt handler */
- ret = priv->status;
-
-out_disable_controller:
- fpga_programmer_disable(priv);
-out_dma_unmap:
- fpga_dma_unmap(priv);
-out_free_table:
- sg_free_table(&table);
-out_return:
- return ret;
-}
-
-/*
- * Interrupt Handling
- */
-
-static irqreturn_t fpga_irq(int irq, void *dev_id)
-{
- struct fpga_dev *priv = dev_id;
-
- /* Save the status */
- priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
- dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
- fpga_dump_registers(priv);
-
- /* Disabling the programmer clears the interrupt */
- fpga_programmer_disable(priv);
-
- /* Notify any waiters */
- complete(&priv->completion);
-
- return IRQ_HANDLED;
-}
-
-/*
- * SYSFS Helpers
- */
-
-/**
- * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
- * @priv: the driver's private data structure
- *
- * LOCKING: must hold priv->lock
- */
-static int fpga_do_stop(struct fpga_dev *priv)
-{
- u32 val;
-
- /* Set the led to unprogrammed */
- ledtrig_fpga_programmed(false);
-
- /* Pulse the config line to reset the FPGA's */
- val = CFG_CTL_ENABLE | CFG_CTL_RESET;
- iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
- iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
-
- return 0;
-}
-
-static noinline int fpga_do_program(struct fpga_dev *priv)
-{
- int ret;
-
- if (priv->bytes != priv->fw_size) {
- dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
- "should be %zu bytes\n",
- priv->bytes, priv->fw_size);
- return -EINVAL;
- }
-
- if (!fpga_power_enabled(priv)) {
- dev_err(priv->dev, "Power not enabled\n");
- return -EINVAL;
- }
-
- if (!fpga_power_good(priv)) {
- dev_err(priv->dev, "Power not good\n");
- return -EINVAL;
- }
-
- /* Set the LED to unprogrammed */
- ledtrig_fpga_programmed(false);
-
- /* Try to program the FPGA's using DMA */
- ret = fpga_program_dma(priv);
-
- /* If DMA failed or doesn't exist, try with CPU */
- if (ret) {
- dev_warn(priv->dev, "Falling back to CPU programming\n");
- ret = fpga_program_cpu(priv);
- }
-
- if (ret) {
- dev_err(priv->dev, "Unable to program FPGA's\n");
- return ret;
- }
-
- /* Drop the firmware bitfile from memory */
- fpga_drop_firmware_data(priv);
-
- dev_dbg(priv->dev, "FPGA programming successful\n");
- ledtrig_fpga_programmed(true);
-
- return 0;
-}
-
-/*
- * File Operations
- */
-
-static int fpga_open(struct inode *inode, struct file *filp)
-{
- /*
- * The miscdevice layer puts our struct miscdevice into the
- * filp->private_data field. We use this to find our private
- * data and then overwrite it with our own private structure.
- */
- struct fpga_dev *priv = container_of(filp->private_data,
- struct fpga_dev, miscdev);
- unsigned int nr_pages;
- int ret;
-
- /* We only allow one process at a time */
- ret = mutex_lock_interruptible(&priv->lock);
- if (ret)
- return ret;
-
- filp->private_data = priv;
- kref_get(&priv->ref);
-
- /* Truncation: drop any existing data */
- if (filp->f_flags & O_TRUNC)
- priv->bytes = 0;
-
- /* Check if we have already allocated a buffer */
- if (priv->buf_allocated)
- return 0;
-
- /* Allocate a buffer to hold enough data for the bitfile */
- nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
- ret = fpga_dma_init(priv, nr_pages);
- if (ret) {
- dev_err(priv->dev, "unable to allocate data buffer\n");
- mutex_unlock(&priv->lock);
- kref_put(&priv->ref, fpga_dev_remove);
- return ret;
- }
-
- priv->buf_allocated = true;
- return 0;
-}
-
-static int fpga_release(struct inode *inode, struct file *filp)
-{
- struct fpga_dev *priv = filp->private_data;
-
- mutex_unlock(&priv->lock);
- kref_put(&priv->ref, fpga_dev_remove);
- return 0;
-}
-
-static ssize_t fpga_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- struct fpga_dev *priv = filp->private_data;
-
- /* FPGA bitfiles have an exact size: disallow anything else */
- if (priv->bytes >= priv->fw_size)
- return -ENOSPC;
-
- count = min_t(size_t, priv->fw_size - priv->bytes, count);
- if (copy_from_user(priv->vaddr + priv->bytes, buf, count))
- return -EFAULT;
-
- priv->bytes += count;
- return count;
-}
-
-static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
-{
- struct fpga_dev *priv = filp->private_data;
- return simple_read_from_buffer(buf, count, f_pos,
- priv->vaddr, priv->bytes);
-}
-
-static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
-{
- struct fpga_dev *priv = filp->private_data;
-
- /* only read-only opens are allowed to seek */
- if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
- return -EINVAL;
-
- return fixed_size_llseek(filp, offset, origin, priv->fw_size);
-}
-
-static const struct file_operations fpga_fops = {
- .open = fpga_open,
- .release = fpga_release,
- .write = fpga_write,
- .read = fpga_read,
- .llseek = fpga_llseek,
-};
-
-/*
- * Device Attributes
- */
-
-static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- u8 val;
-
- val = ioread8(priv->regs + CTL_PWR_FAIL);
- return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
-}
-
-static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
-}
-
-static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
-}
-
-static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- if (val) {
- ret = fpga_enable_power_supplies(priv);
- if (ret)
- return ret;
- } else {
- fpga_do_stop(priv);
- fpga_disable_power_supplies(priv);
- }
-
- return count;
-}
-
-static ssize_t program_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
-}
-
-static ssize_t program_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- /* We can't have an image writer and be programming simultaneously */
- if (mutex_lock_interruptible(&priv->lock))
- return -ERESTARTSYS;
-
- /* Program or Reset the FPGA's */
- ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
- if (ret)
- goto out_unlock;
-
- /* Success */
- ret = count;
-
-out_unlock:
- mutex_unlock(&priv->lock);
- return ret;
-}
-
-static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
-static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
-static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
- penable_show, penable_store);
-
-static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
- program_show, program_store);
-
-static struct attribute *fpga_attributes[] = {
- &dev_attr_power_fail.attr,
- &dev_attr_power_good.attr,
- &dev_attr_power_enable.attr,
- &dev_attr_program.attr,
- NULL,
-};
-
-static const struct attribute_group fpga_attr_group = {
- .attrs = fpga_attributes,
-};
-
-/*
- * OpenFirmware Device Subsystem
- */
-
-#define SYS_REG_VERSION 0x00
-#define SYS_REG_GEOGRAPHIC 0x10
-
-static bool dma_filter(struct dma_chan *chan, void *data)
-{
- /*
- * DMA Channel #0 is the only acceptable device
- *
- * This probably won't survive an unload/load cycle of the Freescale
- * DMAEngine driver, but that won't be a problem
- */
- return chan->chan_id == 0 && chan->device->dev_id == 0;
-}
-
-static int fpga_of_remove(struct platform_device *op)
-{
- struct fpga_dev *priv = platform_get_drvdata(op);
- struct device *this_device = priv->miscdev.this_device;
-
- sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
- misc_deregister(&priv->miscdev);
-
- free_irq(priv->irq, priv);
- irq_dispose_mapping(priv->irq);
-
- /* make sure the power supplies are off */
- fpga_disable_power_supplies(priv);
-
- /* unmap registers */
- iounmap(priv->immr);
- iounmap(priv->regs);
-
- dma_release_channel(priv->chan);
-
- /* drop our reference to the private data structure */
- kref_put(&priv->ref, fpga_dev_remove);
- return 0;
-}
-
-/* CTL-CPLD Version Register */
-#define CTL_CPLD_VERSION 0x2000
-
-static int fpga_of_probe(struct platform_device *op)
-{
- struct device_node *of_node = op->dev.of_node;
- struct device *this_device;
- struct fpga_dev *priv;
- dma_cap_mask_t mask;
- u32 ver;
- int ret;
-
- /* Allocate private data */
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&op->dev, "Unable to allocate private data\n");
- ret = -ENOMEM;
- goto out_return;
- }
-
- /* Setup the miscdevice */
- priv->miscdev.minor = MISC_DYNAMIC_MINOR;
- priv->miscdev.name = drv_name;
- priv->miscdev.fops = &fpga_fops;
-
- kref_init(&priv->ref);
-
- platform_set_drvdata(op, priv);
- priv->dev = &op->dev;
- mutex_init(&priv->lock);
- init_completion(&priv->completion);
-
- dev_set_drvdata(priv->dev, priv);
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_SG, mask);
-
- /* Get control of DMA channel #0 */
- priv->chan = dma_request_channel(mask, dma_filter, NULL);
- if (!priv->chan) {
- dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
- ret = -ENODEV;
- goto out_free_priv;
- }
-
- /* Remap the registers for use */
- priv->regs = of_iomap(of_node, 0);
- if (!priv->regs) {
- dev_err(&op->dev, "Unable to ioremap registers\n");
- ret = -ENOMEM;
- goto out_dma_release_channel;
- }
-
- /* Remap the IMMR for use */
- priv->immr = ioremap(get_immrbase(), 0x100000);
- if (!priv->immr) {
- dev_err(&op->dev, "Unable to ioremap IMMR\n");
- ret = -ENOMEM;
- goto out_unmap_regs;
- }
-
- /*
- * Check that external DMA is configured
- *
- * U-Boot does this for us, but we should check it and bail out if
- * there is a problem. Failing to have this register setup correctly
- * will cause the DMA controller to transfer a single cacheline
- * worth of data, then wedge itself.
- */
- if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
- dev_err(&op->dev, "External DMA control not configured\n");
- ret = -ENODEV;
- goto out_unmap_immr;
- }
-
- /*
- * Check the CTL-CPLD version
- *
- * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
- * don't want to run on any version of the CTL-CPLD that does not use
- * a compatible register layout.
- *
- * v2: changed register layout, added power sequencer
- * v3: added glitch filter on the i2c overcurrent/overtemp outputs
- */
- ver = ioread8(priv->regs + CTL_CPLD_VERSION);
- if (ver != 0x02 && ver != 0x03) {
- dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
- ret = -ENODEV;
- goto out_unmap_immr;
- }
-
- /* Set the exact size that the firmware image should be */
- ver = ioread32be(priv->regs + SYS_REG_VERSION);
- priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
-
- /* Find the correct IRQ number */
- priv->irq = irq_of_parse_and_map(of_node, 0);
- if (priv->irq == NO_IRQ) {
- dev_err(&op->dev, "Unable to find IRQ line\n");
- ret = -ENODEV;
- goto out_unmap_immr;
- }
-
- /* Request the IRQ */
- ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
- if (ret) {
- dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
- ret = -ENODEV;
- goto out_irq_dispose_mapping;
- }
-
- /* Reset and stop the FPGA's, just in case */
- fpga_do_stop(priv);
-
- /* Register the miscdevice */
- ret = misc_register(&priv->miscdev);
- if (ret) {
- dev_err(&op->dev, "Unable to register miscdevice\n");
- goto out_free_irq;
- }
-
- /* Create the sysfs files */
- this_device = priv->miscdev.this_device;
- dev_set_drvdata(this_device, priv);
- ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
- if (ret) {
- dev_err(&op->dev, "Unable to create sysfs files\n");
- goto out_misc_deregister;
- }
-
- dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
- (ver & (1 << 17)) ? "Correlator" : "Digitizer",
- (ver & (1 << 16)) ? "B" : "A",
- (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
-
- return 0;
-
-out_misc_deregister:
- misc_deregister(&priv->miscdev);
-out_free_irq:
- free_irq(priv->irq, priv);
-out_irq_dispose_mapping:
- irq_dispose_mapping(priv->irq);
-out_unmap_immr:
- iounmap(priv->immr);
-out_unmap_regs:
- iounmap(priv->regs);
-out_dma_release_channel:
- dma_release_channel(priv->chan);
-out_free_priv:
- kref_put(&priv->ref, fpga_dev_remove);
-out_return:
- return ret;
-}
-
-static const struct of_device_id fpga_of_match[] = {
- { .compatible = "carma,fpga-programmer", },
- {},
-};
-
-static struct platform_driver fpga_of_driver = {
- .probe = fpga_of_probe,
- .remove = fpga_of_remove,
- .driver = {
- .name = drv_name,
- .of_match_table = fpga_of_match,
- },
-};
-
-/*
- * Module Init / Exit
- */
-
-static int __init fpga_init(void)
-{
- led_trigger_register_simple("fpga", &ledtrig_fpga);
- return platform_driver_register(&fpga_of_driver);
-}
-
-static void __exit fpga_exit(void)
-{
- platform_driver_unregister(&fpga_of_driver);
- led_trigger_unregister_simple(ledtrig_fpga);
-}
-
-MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
-MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
-MODULE_LICENSE("GPL");
-
-module_init(fpga_init);
-module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
deleted file mode 100644
index 5aba3fd789de..000000000000
--- a/drivers/misc/carma/carma-fpga.c
+++ /dev/null
@@ -1,1507 +0,0 @@
-/*
- * CARMA DATA-FPGA Access Driver
- *
- * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/*
- * FPGA Memory Dump Format
- *
- * FPGA #0 control registers (32 x 32-bit words)
- * FPGA #1 control registers (32 x 32-bit words)
- * FPGA #2 control registers (32 x 32-bit words)
- * FPGA #3 control registers (32 x 32-bit words)
- * SYSFPGA control registers (32 x 32-bit words)
- * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
- * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
- * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
- * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
- *
- * Each correlation array consists of:
- *
- * Correlation Data (2 x NUM_LAGSn x 32-bit words)
- * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
- * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
- *
- * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
- * the FPGA configuration registers. They do not change once the FPGA's
- * have been programmed, they only change on re-programming.
- */
-
-/*
- * Basic Description:
- *
- * This driver is used to capture correlation spectra off of the four data
- * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
- * this driver supports dynamic enable/disable of capture while the device
- * remains open.
- *
- * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
- * capture rate, all buffers are pre-allocated to avoid any potentially long
- * running memory allocations while capturing.
- *
- * There are two lists and one pointer which are used to keep track of the
- * different states of data buffers.
- *
- * 1) free list
- * This list holds all empty data buffers which are ready to receive data.
- *
- * 2) inflight pointer
- * This pointer holds the currently inflight data buffer. This buffer is having
- * data copied into it by the DMA engine.
- *
- * 3) used list
- * This list holds data buffers which have been filled, and are waiting to be
- * read by userspace.
- *
- * All buffers start life on the free list, then move successively to the
- * inflight pointer, and then to the used list. After they have been read by
- * userspace, they are moved back to the free list. The cycle repeats as long
- * as necessary.
- *
- * It should be noted that all buffers are mapped and ready for DMA when they
- * are on any of the three lists. They are only unmapped when they are in the
- * process of being read by userspace.
- */
-
-/*
- * Notes on the IRQ masking scheme:
- *
- * The IRQ masking scheme here is different than most other hardware. The only
- * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
- * the data is if the status registers are not cleared before the next
- * correlation data dump is ready.
- *
- * The interrupt line is connected to the status registers, such that when they
- * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
- * to schedule a long-running DMA operation and return from the interrupt
- * handler quickly, but we cannot clear the status registers.
- *
- * To handle this, the system controller FPGA has the capability to connect the
- * interrupt line to a user-controlled GPIO pin. This pin is driven high
- * (unasserted) and left that way. To mask the interrupt, we change the
- * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
- */
-
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/dma-mapping.h>
-#include <linux/miscdevice.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/seq_file.h>
-#include <linux/highmem.h>
-#include <linux/debugfs.h>
-#include <linux/vmalloc.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/kref.h>
-#include <linux/io.h>
-
-/* system controller registers */
-#define SYS_IRQ_SOURCE_CTL 0x24
-#define SYS_IRQ_OUTPUT_EN 0x28
-#define SYS_IRQ_OUTPUT_DATA 0x2C
-#define SYS_IRQ_INPUT_DATA 0x30
-#define SYS_FPGA_CONFIG_STATUS 0x44
-
-/* GPIO IRQ line assignment */
-#define IRQ_CORL_DONE 0x10
-
-/* FPGA registers */
-#define MMAP_REG_VERSION 0x00
-#define MMAP_REG_CORL_CONF1 0x08
-#define MMAP_REG_CORL_CONF2 0x0C
-#define MMAP_REG_STATUS 0x48
-
-#define SYS_FPGA_BLOCK 0xF0000000
-
-#define DATA_FPGA_START 0x400000
-#define DATA_FPGA_SIZE 0x80000
-
-static const char drv_name[] = "carma-fpga";
-
-#define NUM_FPGA 4
-
-#define MIN_DATA_BUFS 8
-#define MAX_DATA_BUFS 64
-
-struct fpga_info {
- unsigned int num_lag_ram;
- unsigned int blk_size;
-};
-
-struct data_buf {
- struct list_head entry;
- void *vaddr;
- struct scatterlist *sglist;
- int sglen;
- int nr_pages;
- size_t size;
-};
-
-struct fpga_device {
- /* character device */
- struct miscdevice miscdev;
- struct device *dev;
- struct mutex mutex;
-
- /* reference count */
- struct kref ref;
-
- /* FPGA registers and information */
- struct fpga_info info[NUM_FPGA];
- void __iomem *regs;
- int irq;
-
- /* FPGA Physical Address/Size Information */
- resource_size_t phys_addr;
- size_t phys_size;
-
- /* DMA structures */
- struct sg_table corl_table;
- unsigned int corl_nents;
- struct dma_chan *chan;
-
- /* Protection for all members below */
- spinlock_t lock;
-
- /* Device enable/disable flag */
- bool enabled;
-
- /* Correlation data buffers */
- wait_queue_head_t wait;
- struct list_head free;
- struct list_head used;
- struct data_buf *inflight;
-
- /* Information about data buffers */
- unsigned int num_dropped;
- unsigned int num_buffers;
- size_t bufsize;
- struct dentry *dbg_entry;
-};
-
-struct fpga_reader {
- struct fpga_device *priv;
- struct data_buf *buf;
- off_t buf_start;
-};
-
-static void fpga_device_release(struct kref *ref)
-{
- struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
-
- /* the last reader has exited, cleanup the last bits */
- mutex_destroy(&priv->mutex);
- kfree(priv);
-}
-
-/*
- * Data Buffer Allocation Helpers
- */
-
-static int carma_dma_init(struct data_buf *buf, int nr_pages)
-{
- struct page *pg;
- int i;
-
- buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == buf->vaddr) {
- pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
- return -ENOMEM;
- }
-
- pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)buf->vaddr,
- nr_pages << PAGE_SHIFT);
-
- memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
- buf->nr_pages = nr_pages;
-
- buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
- if (NULL == buf->sglist)
- goto vzalloc_err;
-
- sg_init_table(buf->sglist, buf->nr_pages);
- for (i = 0; i < buf->nr_pages; i++) {
- pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
- if (NULL == pg)
- goto vmalloc_to_page_err;
- sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
- }
- return 0;
-
-vmalloc_to_page_err:
- vfree(buf->sglist);
- buf->sglist = NULL;
-vzalloc_err:
- vfree(buf->vaddr);
- buf->vaddr = NULL;
- return -ENOMEM;
-}
-
-static int carma_dma_map(struct device *dev, struct data_buf *buf)
-{
- buf->sglen = dma_map_sg(dev, buf->sglist,
- buf->nr_pages, DMA_FROM_DEVICE);
-
- if (0 == buf->sglen) {
- pr_warn("%s: dma_map_sg failed\n", __func__);
- return -ENOMEM;
- }
- return 0;
-}
-
-static int carma_dma_unmap(struct device *dev, struct data_buf *buf)
-{
- if (!buf->sglen)
- return 0;
-
- dma_unmap_sg(dev, buf->sglist, buf->sglen, DMA_FROM_DEVICE);
- buf->sglen = 0;
- return 0;
-}
-
-/**
- * data_free_buffer() - free a single data buffer and all allocated memory
- * @buf: the buffer to free
- *
- * This will free all of the pages allocated to the given data buffer, and
- * then free the structure itself
- */
-static void data_free_buffer(struct data_buf *buf)
-{
- /* It is ok to free a NULL buffer */
- if (!buf)
- return;
-
- /* free all memory */
- vfree(buf->sglist);
- vfree(buf->vaddr);
- kfree(buf);
-}
-
-/**
- * data_alloc_buffer() - allocate and fill a data buffer with pages
- * @bytes: the number of bytes required
- *
- * This allocates all space needed for a data buffer. It must be mapped before
- * use in a DMA transaction using carma_dma_map().
- *
- * Returns NULL on failure
- */
-static struct data_buf *data_alloc_buffer(const size_t bytes)
-{
- unsigned int nr_pages;
- struct data_buf *buf;
- int ret;
-
- /* calculate the number of pages necessary */
- nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
-
- /* allocate the buffer structure */
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- goto out_return;
-
- /* initialize internal fields */
- INIT_LIST_HEAD(&buf->entry);
- buf->size = bytes;
-
- /* allocate the buffer */
- ret = carma_dma_init(buf, nr_pages);
- if (ret)
- goto out_free_buf;
-
- return buf;
-
-out_free_buf:
- kfree(buf);
-out_return:
- return NULL;
-}
-
-/**
- * data_free_buffers() - free all allocated buffers
- * @priv: the driver's private data structure
- *
- * Free all buffers allocated by the driver (except those currently in the
- * process of being read by userspace).
- *
- * LOCKING: must hold dev->mutex
- * CONTEXT: user
- */
-static void data_free_buffers(struct fpga_device *priv)
-{
- struct data_buf *buf, *tmp;
-
- /* the device should be stopped, no DMA in progress */
- BUG_ON(priv->inflight != NULL);
-
- list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
- list_del_init(&buf->entry);
- carma_dma_unmap(priv->dev, buf);
- data_free_buffer(buf);
- }
-
- list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
- list_del_init(&buf->entry);
- carma_dma_unmap(priv->dev, buf);
- data_free_buffer(buf);
- }
-
- priv->num_buffers = 0;
- priv->bufsize = 0;
-}
-
-/**
- * data_alloc_buffers() - allocate 1 seconds worth of data buffers
- * @priv: the driver's private data structure
- *
- * Allocate enough buffers for a whole second worth of data
- *
- * This routine will attempt to degrade nicely by succeeding even if a full
- * second worth of data buffers could not be allocated, as long as a minimum
- * number were allocated. In this case, it will print a message to the kernel
- * log.
- *
- * The device must not be modifying any lists when this is called.
- *
- * CONTEXT: user
- * LOCKING: must hold dev->mutex
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_alloc_buffers(struct fpga_device *priv)
-{
- struct data_buf *buf;
- int i, ret;
-
- for (i = 0; i < MAX_DATA_BUFS; i++) {
-
- /* allocate a buffer */
- buf = data_alloc_buffer(priv->bufsize);
- if (!buf)
- break;
-
- /* map it for DMA */
- ret = carma_dma_map(priv->dev, buf);
- if (ret) {
- data_free_buffer(buf);
- break;
- }
-
- /* add it to the list of free buffers */
- list_add_tail(&buf->entry, &priv->free);
- priv->num_buffers++;
- }
-
- /* Make sure we allocated the minimum required number of buffers */
- if (priv->num_buffers < MIN_DATA_BUFS) {
- dev_err(priv->dev, "Unable to allocate enough data buffers\n");
- data_free_buffers(priv);
- return -ENOMEM;
- }
-
- /* Warn if we are running in a degraded state, but do not fail */
- if (priv->num_buffers < MAX_DATA_BUFS) {
- dev_warn(priv->dev,
- "Unable to allocate %d buffers, using %d buffers instead\n",
- MAX_DATA_BUFS, i);
- }
-
- return 0;
-}
-
-/*
- * DMA Operations Helpers
- */
-
-/**
- * fpga_start_addr() - get the physical address a DATA-FPGA
- * @priv: the driver's private data structure
- * @fpga: the DATA-FPGA number (zero based)
- */
-static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
-{
- return priv->phys_addr + 0x400000 + (0x80000 * fpga);
-}
-
-/**
- * fpga_block_addr() - get the physical address of a correlation data block
- * @priv: the driver's private data structure
- * @fpga: the DATA-FPGA number (zero based)
- * @blknum: the correlation block number (zero based)
- */
-static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
- unsigned int blknum)
-{
- return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
-}
-
-#define REG_BLOCK_SIZE (32 * 4)
-
-/**
- * data_setup_corl_table() - create the scatterlist for correlation dumps
- * @priv: the driver's private data structure
- *
- * Create the scatterlist for transferring a correlation dump from the
- * DATA FPGAs. This structure will be reused for each buffer than needs
- * to be filled with correlation data.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_setup_corl_table(struct fpga_device *priv)
-{
- struct sg_table *table = &priv->corl_table;
- struct scatterlist *sg;
- struct fpga_info *info;
- int i, j, ret;
-
- /* Calculate the number of entries needed */
- priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
- for (i = 0; i < NUM_FPGA; i++)
- priv->corl_nents += priv->info[i].num_lag_ram;
-
- /* Allocate the scatterlist table */
- ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
- if (ret) {
- dev_err(priv->dev, "unable to allocate DMA table\n");
- return ret;
- }
-
- /* Add the DATA FPGA registers to the scatterlist */
- sg = table->sgl;
- for (i = 0; i < NUM_FPGA; i++) {
- sg_dma_address(sg) = fpga_start_addr(priv, i);
- sg_dma_len(sg) = REG_BLOCK_SIZE;
- sg = sg_next(sg);
- }
-
- /* Add the SYS-FPGA registers to the scatterlist */
- sg_dma_address(sg) = SYS_FPGA_BLOCK;
- sg_dma_len(sg) = REG_BLOCK_SIZE;
- sg = sg_next(sg);
-
- /* Add the FPGA correlation data blocks to the scatterlist */
- for (i = 0; i < NUM_FPGA; i++) {
- info = &priv->info[i];
- for (j = 0; j < info->num_lag_ram; j++) {
- sg_dma_address(sg) = fpga_block_addr(priv, i, j);
- sg_dma_len(sg) = info->blk_size;
- sg = sg_next(sg);
- }
- }
-
- /*
- * All physical addresses and lengths are present in the structure
- * now. It can be reused for every FPGA DATA interrupt
- */
- return 0;
-}
-
-/*
- * FPGA Register Access Helpers
- */
-
-static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
- unsigned int reg, u32 val)
-{
- const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
- iowrite32be(val, priv->regs + fpga_start + reg);
-}
-
-static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
- unsigned int reg)
-{
- const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
- return ioread32be(priv->regs + fpga_start + reg);
-}
-
-/**
- * data_calculate_bufsize() - calculate the data buffer size required
- * @priv: the driver's private data structure
- *
- * Calculate the total buffer size needed to hold a single block
- * of correlation data
- *
- * CONTEXT: user
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_calculate_bufsize(struct fpga_device *priv)
-{
- u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
- u32 conf1, conf2, version;
- u32 num_lag_ram, blk_size;
- int i;
-
- /* Each buffer starts with the 5 FPGA register areas */
- priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
-
- /* Read and store the configuration data for each FPGA */
- for (i = 0; i < NUM_FPGA; i++) {
- version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
- conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
- conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
-
- /* minor version 2 and later */
- if ((version & 0x000000FF) >= 2) {
- num_corl = (conf1 & 0x000000F0) >> 4;
- num_pack = (conf1 & 0x00000F00) >> 8;
- num_lags = (conf1 & 0x00FFF000) >> 12;
- num_meta = (conf1 & 0x7F000000) >> 24;
- num_qcnt = (conf2 & 0x00000FFF) >> 0;
- } else {
- num_corl = (conf1 & 0x000000F0) >> 4;
- num_pack = 1; /* implied */
- num_lags = (conf1 & 0x000FFF00) >> 8;
- num_meta = (conf1 & 0x7FF00000) >> 20;
- num_qcnt = (conf2 & 0x00000FFF) >> 0;
- }
-
- num_lag_ram = (num_corl + num_pack - 1) / num_pack;
- blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
-
- priv->info[i].num_lag_ram = num_lag_ram;
- priv->info[i].blk_size = blk_size;
- priv->bufsize += num_lag_ram * blk_size;
-
- dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
- dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
- dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
- dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
- dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
- dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
- }
-
- dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
- return 0;
-}
-
-/*
- * Interrupt Handling
- */
-
-/**
- * data_disable_interrupts() - stop the device from generating interrupts
- * @priv: the driver's private data structure
- *
- * Hide interrupts by switching to GPIO interrupt source
- *
- * LOCKING: must hold dev->lock
- */
-static void data_disable_interrupts(struct fpga_device *priv)
-{
- /* hide the interrupt by switching the IRQ driver to GPIO */
- iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
-}
-
-/**
- * data_enable_interrupts() - allow the device to generate interrupts
- * @priv: the driver's private data structure
- *
- * Unhide interrupts by switching to the FPGA interrupt source. At the
- * same time, clear the DATA-FPGA status registers.
- *
- * LOCKING: must hold dev->lock
- */
-static void data_enable_interrupts(struct fpga_device *priv)
-{
- /* clear the actual FPGA corl_done interrupt */
- fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
- fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
- fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
- fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
-
- /* flush the writes */
- fpga_read_reg(priv, 0, MMAP_REG_STATUS);
- fpga_read_reg(priv, 1, MMAP_REG_STATUS);
- fpga_read_reg(priv, 2, MMAP_REG_STATUS);
- fpga_read_reg(priv, 3, MMAP_REG_STATUS);
-
- /* switch back to the external interrupt source */
- iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
-}
-
-/**
- * data_dma_cb() - DMAEngine callback for DMA completion
- * @data: the driver's private data structure
- *
- * Complete a DMA transfer from the DATA-FPGA's
- *
- * This is called via the DMA callback mechanism, and will handle moving the
- * completed DMA transaction to the used list, and then wake any processes
- * waiting for new data
- *
- * CONTEXT: any, softirq expected
- */
-static void data_dma_cb(void *data)
-{
- struct fpga_device *priv = data;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* If there is no inflight buffer, we've got a bug */
- BUG_ON(priv->inflight == NULL);
-
- /* Move the inflight buffer onto the used list */
- list_move_tail(&priv->inflight->entry, &priv->used);
- priv->inflight = NULL;
-
- /*
- * If data dumping is still enabled, then clear the FPGA
- * status registers and re-enable FPGA interrupts
- */
- if (priv->enabled)
- data_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * We've changed both the inflight and used lists, so we need
- * to wake up any processes that are blocking for those events
- */
- wake_up(&priv->wait);
-}
-
-/**
- * data_submit_dma() - prepare and submit the required DMA to fill a buffer
- * @priv: the driver's private data structure
- * @buf: the data buffer
- *
- * Prepare and submit the necessary DMA transactions to fill a correlation
- * data buffer.
- *
- * LOCKING: must hold dev->lock
- * CONTEXT: hardirq only
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
-{
- struct scatterlist *dst_sg, *src_sg;
- unsigned int dst_nents, src_nents;
- struct dma_chan *chan = priv->chan;
- struct dma_async_tx_descriptor *tx;
- dma_cookie_t cookie;
- dma_addr_t dst, src;
- unsigned long dma_flags = 0;
-
- dst_sg = buf->sglist;
- dst_nents = buf->sglen;
-
- src_sg = priv->corl_table.sgl;
- src_nents = priv->corl_nents;
-
- /*
- * All buffers passed to this function should be ready and mapped
- * for DMA already. Therefore, we don't need to do anything except
- * submit it to the Freescale DMA Engine for processing
- */
-
- /* setup the scatterlist to scatterlist transfer */
- tx = chan->device->device_prep_dma_sg(chan,
- dst_sg, dst_nents,
- src_sg, src_nents,
- 0);
- if (!tx) {
- dev_err(priv->dev, "unable to prep scatterlist DMA\n");
- return -ENOMEM;
- }
-
- /* submit the transaction to the DMA controller */
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- dev_err(priv->dev, "unable to submit scatterlist DMA\n");
- return -ENOMEM;
- }
-
- /* Prepare the re-read of the SYS-FPGA block */
- dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
- src = SYS_FPGA_BLOCK;
- tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
- REG_BLOCK_SIZE,
- dma_flags);
- if (!tx) {
- dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
- return -ENOMEM;
- }
-
- /* Setup the callback */
- tx->callback = data_dma_cb;
- tx->callback_param = priv;
-
- /* submit the transaction to the DMA controller */
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-#define CORL_DONE 0x1
-#define CORL_ERR 0x2
-
-static irqreturn_t data_irq(int irq, void *dev_id)
-{
- struct fpga_device *priv = dev_id;
- bool submitted = false;
- struct data_buf *buf;
- u32 status;
- int i;
-
- /* detect spurious interrupts via FPGA status */
- for (i = 0; i < 4; i++) {
- status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
- if (!(status & (CORL_DONE | CORL_ERR))) {
- dev_err(priv->dev, "spurious irq detected (FPGA)\n");
- return IRQ_NONE;
- }
- }
-
- /* detect spurious interrupts via raw IRQ pin readback */
- status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
- if (status & IRQ_CORL_DONE) {
- dev_err(priv->dev, "spurious irq detected (IRQ)\n");
- return IRQ_NONE;
- }
-
- spin_lock(&priv->lock);
-
- /*
- * This is an error case that should never happen.
- *
- * If this driver has a bug and manages to re-enable interrupts while
- * a DMA is in progress, then we will hit this statement and should
- * start paying attention immediately.
- */
- BUG_ON(priv->inflight != NULL);
-
- /* hide the interrupt by switching the IRQ driver to GPIO */
- data_disable_interrupts(priv);
-
- /* If there are no free buffers, drop this data */
- if (list_empty(&priv->free)) {
- priv->num_dropped++;
- goto out;
- }
-
- buf = list_first_entry(&priv->free, struct data_buf, entry);
- list_del_init(&buf->entry);
- BUG_ON(buf->size != priv->bufsize);
-
- /* Submit a DMA transfer to get the correlation data */
- if (data_submit_dma(priv, buf)) {
- dev_err(priv->dev, "Unable to setup DMA transfer\n");
- list_move_tail(&buf->entry, &priv->free);
- goto out;
- }
-
- /* Save the buffer for the DMA callback */
- priv->inflight = buf;
- submitted = true;
-
- /* Start the DMA Engine */
- dma_async_issue_pending(priv->chan);
-
-out:
- /* If no DMA was submitted, re-enable interrupts */
- if (!submitted)
- data_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * Realtime Device Enable Helpers
- */
-
-/**
- * data_device_enable() - enable the device for buffered dumping
- * @priv: the driver's private data structure
- *
- * Enable the device for buffered dumping. Allocates buffers and hooks up
- * the interrupt handler. When this finishes, data will come pouring in.
- *
- * LOCKING: must hold dev->mutex
- * CONTEXT: user context only
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_device_enable(struct fpga_device *priv)
-{
- bool enabled;
- u32 val;
- int ret;
-
- /* multiple enables are safe: they do nothing */
- spin_lock_irq(&priv->lock);
- enabled = priv->enabled;
- spin_unlock_irq(&priv->lock);
- if (enabled)
- return 0;
-
- /* check that the FPGAs are programmed */
- val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
- if (!(val & (1 << 18))) {
- dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
- return -ENODATA;
- }
-
- /* read the FPGAs to calculate the buffer size */
- ret = data_calculate_bufsize(priv);
- if (ret) {
- dev_err(priv->dev, "unable to calculate buffer size\n");
- goto out_error;
- }
-
- /* allocate the correlation data buffers */
- ret = data_alloc_buffers(priv);
- if (ret) {
- dev_err(priv->dev, "unable to allocate buffers\n");
- goto out_error;
- }
-
- /* setup the source scatterlist for dumping correlation data */
- ret = data_setup_corl_table(priv);
- if (ret) {
- dev_err(priv->dev, "unable to setup correlation DMA table\n");
- goto out_error;
- }
-
- /* prevent the FPGAs from generating interrupts */
- data_disable_interrupts(priv);
-
- /* hookup the irq handler */
- ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
- if (ret) {
- dev_err(priv->dev, "unable to request IRQ handler\n");
- goto out_error;
- }
-
- /* allow the DMA callback to re-enable FPGA interrupts */
- spin_lock_irq(&priv->lock);
- priv->enabled = true;
- spin_unlock_irq(&priv->lock);
-
- /* allow the FPGAs to generate interrupts */
- data_enable_interrupts(priv);
- return 0;
-
-out_error:
- sg_free_table(&priv->corl_table);
- priv->corl_nents = 0;
-
- data_free_buffers(priv);
- return ret;
-}
-
-/**
- * data_device_disable() - disable the device for buffered dumping
- * @priv: the driver's private data structure
- *
- * Disable the device for buffered dumping. Stops new DMA transactions from
- * being generated, waits for all outstanding DMA to complete, and then frees
- * all buffers.
- *
- * LOCKING: must hold dev->mutex
- * CONTEXT: user only
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_device_disable(struct fpga_device *priv)
-{
- spin_lock_irq(&priv->lock);
-
- /* allow multiple disable */
- if (!priv->enabled) {
- spin_unlock_irq(&priv->lock);
- return 0;
- }
-
- /*
- * Mark the device disabled
- *
- * This stops DMA callbacks from re-enabling interrupts
- */
- priv->enabled = false;
-
- /* prevent the FPGAs from generating interrupts */
- data_disable_interrupts(priv);
-
- /* wait until all ongoing DMA has finished */
- while (priv->inflight != NULL) {
- spin_unlock_irq(&priv->lock);
- wait_event(priv->wait, priv->inflight == NULL);
- spin_lock_irq(&priv->lock);
- }
-
- spin_unlock_irq(&priv->lock);
-
- /* unhook the irq handler */
- free_irq(priv->irq, priv);
-
- /* free the correlation table */
- sg_free_table(&priv->corl_table);
- priv->corl_nents = 0;
-
- /* free all buffers: the free and used lists are not being changed */
- data_free_buffers(priv);
- return 0;
-}
-
-/*
- * DEBUGFS Interface
- */
-#ifdef CONFIG_DEBUG_FS
-
-/*
- * Count the number of entries in the given list
- */
-static unsigned int list_num_entries(struct list_head *list)
-{
- struct list_head *entry;
- unsigned int ret = 0;
-
- list_for_each(entry, list)
- ret++;
-
- return ret;
-}
-
-static int data_debug_show(struct seq_file *f, void *offset)
-{
- struct fpga_device *priv = f->private;
-
- spin_lock_irq(&priv->lock);
-
- seq_printf(f, "enabled: %d\n", priv->enabled);
- seq_printf(f, "bufsize: %d\n", priv->bufsize);
- seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
- seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
- seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
- seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
- seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
-
- spin_unlock_irq(&priv->lock);
- return 0;
-}
-
-static int data_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, data_debug_show, inode->i_private);
-}
-
-static const struct file_operations data_debug_fops = {
- .owner = THIS_MODULE,
- .open = data_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int data_debugfs_init(struct fpga_device *priv)
-{
- priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
- &data_debug_fops);
- return PTR_ERR_OR_ZERO(priv->dbg_entry);
-}
-
-static void data_debugfs_exit(struct fpga_device *priv)
-{
- debugfs_remove(priv->dbg_entry);
-}
-
-#else
-
-static inline int data_debugfs_init(struct fpga_device *priv)
-{
- return 0;
-}
-
-static inline void data_debugfs_exit(struct fpga_device *priv)
-{
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
-/*
- * SYSFS Attributes
- */
-
-static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_device *priv = dev_get_drvdata(dev);
- int ret;
-
- spin_lock_irq(&priv->lock);
- ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
- spin_unlock_irq(&priv->lock);
-
- return ret;
-}
-
-static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fpga_device *priv = dev_get_drvdata(dev);
- unsigned long enable;
- int ret;
-
- ret = kstrtoul(buf, 0, &enable);
- if (ret) {
- dev_err(priv->dev, "unable to parse enable input\n");
- return ret;
- }
-
- /* protect against concurrent enable/disable */
- ret = mutex_lock_interruptible(&priv->mutex);
- if (ret)
- return ret;
-
- if (enable)
- ret = data_device_enable(priv);
- else
- ret = data_device_disable(priv);
-
- if (ret) {
- dev_err(priv->dev, "device %s failed\n",
- enable ? "enable" : "disable");
- count = ret;
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&priv->mutex);
- return count;
-}
-
-static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
-
-static struct attribute *data_sysfs_attrs[] = {
- &dev_attr_enable.attr,
- NULL,
-};
-
-static const struct attribute_group rt_sysfs_attr_group = {
- .attrs = data_sysfs_attrs,
-};
-
-/*
- * FPGA Realtime Data Character Device
- */
-
-static int data_open(struct inode *inode, struct file *filp)
-{
- /*
- * The miscdevice layer puts our struct miscdevice into the
- * filp->private_data field. We use this to find our private
- * data and then overwrite it with our own private structure.
- */
- struct fpga_device *priv = container_of(filp->private_data,
- struct fpga_device, miscdev);
- struct fpga_reader *reader;
- int ret;
-
- /* allocate private data */
- reader = kzalloc(sizeof(*reader), GFP_KERNEL);
- if (!reader)
- return -ENOMEM;
-
- reader->priv = priv;
- reader->buf = NULL;
-
- filp->private_data = reader;
- ret = nonseekable_open(inode, filp);
- if (ret) {
- dev_err(priv->dev, "nonseekable-open failed\n");
- kfree(reader);
- return ret;
- }
-
- /*
- * success, increase the reference count of the private data structure
- * so that it doesn't disappear if the device is unbound
- */
- kref_get(&priv->ref);
- return 0;
-}
-
-static int data_release(struct inode *inode, struct file *filp)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
-
- /* free the per-reader structure */
- data_free_buffer(reader->buf);
- kfree(reader);
- filp->private_data = NULL;
-
- /* decrement our reference count to the private data */
- kref_put(&priv->ref, fpga_device_release);
- return 0;
-}
-
-static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
- loff_t *f_pos)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
- struct list_head *used = &priv->used;
- bool drop_buffer = false;
- struct data_buf *dbuf;
- size_t avail;
- void *data;
- int ret;
-
- /* check if we already have a partial buffer */
- if (reader->buf) {
- dbuf = reader->buf;
- goto have_buffer;
- }
-
- spin_lock_irq(&priv->lock);
-
- /* Block until there is at least one buffer on the used list */
- while (list_empty(used)) {
- spin_unlock_irq(&priv->lock);
-
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- ret = wait_event_interruptible(priv->wait, !list_empty(used));
- if (ret)
- return ret;
-
- spin_lock_irq(&priv->lock);
- }
-
- /* Grab the first buffer off of the used list */
- dbuf = list_first_entry(used, struct data_buf, entry);
- list_del_init(&dbuf->entry);
-
- spin_unlock_irq(&priv->lock);
-
- /* Buffers are always mapped: unmap it */
- carma_dma_unmap(priv->dev, dbuf);
-
- /* save the buffer for later */
- reader->buf = dbuf;
- reader->buf_start = 0;
-
-have_buffer:
- /* Get the number of bytes available */
- avail = dbuf->size - reader->buf_start;
- data = dbuf->vaddr + reader->buf_start;
-
- /* Get the number of bytes we can transfer */
- count = min(count, avail);
-
- /* Copy the data to the userspace buffer */
- if (copy_to_user(ubuf, data, count))
- return -EFAULT;
-
- /* Update the amount of available space */
- avail -= count;
-
- /*
- * If there is still some data available, save the buffer for the
- * next userspace call to read() and return
- */
- if (avail > 0) {
- reader->buf_start += count;
- reader->buf = dbuf;
- return count;
- }
-
- /*
- * Get the buffer ready to be reused for DMA
- *
- * If it fails, we pretend that the read never happed and return
- * -EFAULT to userspace. The read will be retried.
- */
- ret = carma_dma_map(priv->dev, dbuf);
- if (ret) {
- dev_err(priv->dev, "unable to remap buffer for DMA\n");
- return -EFAULT;
- }
-
- /* Lock against concurrent enable/disable */
- spin_lock_irq(&priv->lock);
-
- /* the reader is finished with this buffer */
- reader->buf = NULL;
-
- /*
- * One of two things has happened, the device is disabled, or the
- * device has been reconfigured underneath us. In either case, we
- * should just throw away the buffer.
- *
- * Lockdep complains if this is done under the spinlock, so we
- * handle it during the unlock path.
- */
- if (!priv->enabled || dbuf->size != priv->bufsize) {
- drop_buffer = true;
- goto out_unlock;
- }
-
- /* The buffer is safe to reuse, so add it back to the free list */
- list_add_tail(&dbuf->entry, &priv->free);
-
-out_unlock:
- spin_unlock_irq(&priv->lock);
-
- if (drop_buffer) {
- carma_dma_unmap(priv->dev, dbuf);
- data_free_buffer(dbuf);
- }
-
- return count;
-}
-
-static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
- unsigned int mask = 0;
-
- poll_wait(filp, &priv->wait, tbl);
-
- if (!list_empty(&priv->used))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-
-static int data_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
- unsigned long offset, vsize, psize, addr;
-
- /* VMA properties */
- offset = vma->vm_pgoff << PAGE_SHIFT;
- vsize = vma->vm_end - vma->vm_start;
- psize = priv->phys_size - offset;
- addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
-
- /* Check against the FPGA region's physical memory size */
- if (vsize > psize) {
- dev_err(priv->dev, "requested mmap mapping too large\n");
- return -EINVAL;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
- vma->vm_page_prot);
-}
-
-static const struct file_operations data_fops = {
- .owner = THIS_MODULE,
- .open = data_open,
- .release = data_release,
- .read = data_read,
- .poll = data_poll,
- .mmap = data_mmap,
- .llseek = no_llseek,
-};
-
-/*
- * OpenFirmware Device Subsystem
- */
-
-static bool dma_filter(struct dma_chan *chan, void *data)
-{
- /*
- * DMA Channel #0 is used for the FPGA Programmer, so ignore it
- *
- * This probably won't survive an unload/load cycle of the Freescale
- * DMAEngine driver, but that won't be a problem
- */
- if (chan->chan_id == 0 && chan->device->dev_id == 0)
- return false;
-
- return true;
-}
-
-static int data_of_probe(struct platform_device *op)
-{
- struct device_node *of_node = op->dev.of_node;
- struct device *this_device;
- struct fpga_device *priv;
- struct resource res;
- dma_cap_mask_t mask;
- int ret;
-
- /* Allocate private data */
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&op->dev, "Unable to allocate device private data\n");
- ret = -ENOMEM;
- goto out_return;
- }
-
- platform_set_drvdata(op, priv);
- priv->dev = &op->dev;
- kref_init(&priv->ref);
- mutex_init(&priv->mutex);
-
- dev_set_drvdata(priv->dev, priv);
- spin_lock_init(&priv->lock);
- INIT_LIST_HEAD(&priv->free);
- INIT_LIST_HEAD(&priv->used);
- init_waitqueue_head(&priv->wait);
-
- /* Setup the misc device */
- priv->miscdev.minor = MISC_DYNAMIC_MINOR;
- priv->miscdev.name = drv_name;
- priv->miscdev.fops = &data_fops;
-
- /* Get the physical address of the FPGA registers */
- ret = of_address_to_resource(of_node, 0, &res);
- if (ret) {
- dev_err(&op->dev, "Unable to find FPGA physical address\n");
- ret = -ENODEV;
- goto out_free_priv;
- }
-
- priv->phys_addr = res.start;
- priv->phys_size = resource_size(&res);
-
- /* ioremap the registers for use */
- priv->regs = of_iomap(of_node, 0);
- if (!priv->regs) {
- dev_err(&op->dev, "Unable to ioremap registers\n");
- ret = -ENOMEM;
- goto out_free_priv;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
- dma_cap_set(DMA_INTERRUPT, mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_SG, mask);
-
- /* Request a DMA channel */
- priv->chan = dma_request_channel(mask, dma_filter, NULL);
- if (!priv->chan) {
- dev_err(&op->dev, "Unable to request DMA channel\n");
- ret = -ENODEV;
- goto out_unmap_regs;
- }
-
- /* Find the correct IRQ number */
- priv->irq = irq_of_parse_and_map(of_node, 0);
- if (priv->irq == NO_IRQ) {
- dev_err(&op->dev, "Unable to find IRQ line\n");
- ret = -ENODEV;
- goto out_release_dma;
- }
-
- /* Drive the GPIO for FPGA IRQ high (no interrupt) */
- iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
-
- /* Register the miscdevice */
- ret = misc_register(&priv->miscdev);
- if (ret) {
- dev_err(&op->dev, "Unable to register miscdevice\n");
- goto out_irq_dispose_mapping;
- }
-
- /* Create the debugfs files */
- ret = data_debugfs_init(priv);
- if (ret) {
- dev_err(&op->dev, "Unable to create debugfs files\n");
- goto out_misc_deregister;
- }
-
- /* Create the sysfs files */
- this_device = priv->miscdev.this_device;
- dev_set_drvdata(this_device, priv);
- ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
- if (ret) {
- dev_err(&op->dev, "Unable to create sysfs files\n");
- goto out_data_debugfs_exit;
- }
-
- dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
- return 0;
-
-out_data_debugfs_exit:
- data_debugfs_exit(priv);
-out_misc_deregister:
- misc_deregister(&priv->miscdev);
-out_irq_dispose_mapping:
- irq_dispose_mapping(priv->irq);
-out_release_dma:
- dma_release_channel(priv->chan);
-out_unmap_regs:
- iounmap(priv->regs);
-out_free_priv:
- kref_put(&priv->ref, fpga_device_release);
-out_return:
- return ret;
-}
-
-static int data_of_remove(struct platform_device *op)
-{
- struct fpga_device *priv = platform_get_drvdata(op);
- struct device *this_device = priv->miscdev.this_device;
-
- /* remove all sysfs files, now the device cannot be re-enabled */
- sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
-
- /* remove all debugfs files */
- data_debugfs_exit(priv);
-
- /* disable the device from generating data */
- data_device_disable(priv);
-
- /* remove the character device to stop new readers from appearing */
- misc_deregister(&priv->miscdev);
-
- /* cleanup everything not needed by readers */
- irq_dispose_mapping(priv->irq);
- dma_release_channel(priv->chan);
- iounmap(priv->regs);
-
- /* release our reference */
- kref_put(&priv->ref, fpga_device_release);
- return 0;
-}
-
-static const struct of_device_id data_of_match[] = {
- { .compatible = "carma,carma-fpga", },
- {},
-};
-
-static struct platform_driver data_of_driver = {
- .probe = data_of_probe,
- .remove = data_of_remove,
- .driver = {
- .name = drv_name,
- .of_match_table = data_of_match,
- },
-};
-
-module_platform_driver(data_of_driver);
-
-MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
-MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d2cd53e3fac3..1e42781592d8 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -59,46 +59,29 @@ void mei_amthif_reset_params(struct mei_device *dev)
* mei_amthif_host_init - mei initialization amthif client.
*
* @dev: the device structure
+ * @me_cl: me client
*
* Return: 0 on success, <0 on failure.
*/
-int mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
{
struct mei_cl *cl = &dev->iamthif_cl;
- struct mei_me_client *me_cl;
int ret;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_cl_init(cl, dev);
- me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
- if (!me_cl) {
- dev_info(dev->dev, "amthif: failed to find the client");
- return -ENOTTY;
- }
-
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
-
- /* Assign iamthif_mtu to the value received from ME */
-
- dev->iamthif_mtu = me_cl->props.max_msg_length;
- dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu);
-
-
ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
if (ret < 0) {
dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
- goto out;
+ return ret;
}
- ret = mei_cl_connect(cl, NULL);
+ ret = mei_cl_connect(cl, me_cl, NULL);
dev->iamthif_state = MEI_IAMTHIF_IDLE;
-out:
- mei_me_cl_put(me_cl);
return ret;
}
@@ -250,7 +233,6 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
- size_t length = dev->iamthif_mtu;
int rets;
cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
@@ -259,7 +241,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
goto err;
}
- rets = mei_io_cb_alloc_buf(cb, length);
+ rets = mei_io_cb_alloc_buf(cb, mei_cl_mtu(cl));
if (rets)
goto err;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4cf38c39878a..357b6ae4d207 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -35,18 +35,30 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
struct mei_cl_device *device = to_mei_cl_device(dev);
struct mei_cl_driver *driver = to_mei_cl_driver(drv);
const struct mei_cl_device_id *id;
+ const uuid_le *uuid;
+ const char *name;
if (!device)
return 0;
+ uuid = mei_me_cl_uuid(device->me_cl);
+ name = device->name;
+
if (!driver || !driver->id_table)
return 0;
id = driver->id_table;
- while (id->name[0]) {
- if (!strncmp(dev_name(dev), id->name, sizeof(id->name)))
- return 1;
+ while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
+
+ if (!uuid_le_cmp(*uuid, id->uuid)) {
+ if (id->name[0]) {
+ if (!strncmp(name, id->name, sizeof(id->name)))
+ return 1;
+ } else {
+ return 1;
+ }
+ }
id++;
}
@@ -69,7 +81,7 @@ static int mei_cl_device_probe(struct device *dev)
dev_dbg(dev, "Device probe\n");
- strlcpy(id.name, dev_name(dev), sizeof(id.name));
+ strlcpy(id.name, device->name, sizeof(id.name));
return driver->probe(device, &id);
}
@@ -97,18 +109,48 @@ static int mei_cl_device_remove(struct device *dev)
return driver->remove(device);
}
+static ssize_t name_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "%s", device->name);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(uuid);
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
- int len;
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
+ size_t len;
- len = snprintf(buf, PAGE_SIZE, "mei:%s\n", dev_name(dev));
+ len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
+ device->name, MEI_CL_UUID_ARGS(uuid->b));
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *mei_cl_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_uuid.attr,
&dev_attr_modalias.attr,
NULL,
};
@@ -116,7 +158,17 @@ ATTRIBUTE_GROUPS(mei_cl_dev);
static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- if (add_uevent_var(env, "MODALIAS=mei:%s", dev_name(dev)))
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
+
+ if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
+ device->name, MEI_CL_UUID_ARGS(uuid->b)))
return -ENOMEM;
return 0;
@@ -133,7 +185,13 @@ static struct bus_type mei_cl_bus_type = {
static void mei_cl_dev_release(struct device *dev)
{
- kfree(to_mei_cl_device(dev));
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+
+ if (!device)
+ return;
+
+ mei_me_cl_put(device->me_cl);
+ kfree(device);
}
static struct device_type mei_cl_device_type = {
@@ -141,45 +199,50 @@ static struct device_type mei_cl_device_type = {
};
struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
- uuid_le uuid)
+ uuid_le uuid)
{
struct mei_cl *cl;
list_for_each_entry(cl, &dev->device_list, device_link) {
- if (!uuid_le_cmp(uuid, cl->cl_uuid))
+ if (cl->device && cl->device->me_cl &&
+ !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl)))
return cl;
}
return NULL;
}
+
struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
- uuid_le uuid, char *name,
- struct mei_cl_ops *ops)
+ struct mei_me_client *me_cl,
+ struct mei_cl *cl,
+ char *name)
{
struct mei_cl_device *device;
- struct mei_cl *cl;
int status;
- cl = mei_cl_bus_find_cl_by_uuid(dev, uuid);
- if (cl == NULL)
- return NULL;
-
device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
if (!device)
return NULL;
- device->cl = cl;
- device->ops = ops;
+ device->me_cl = mei_me_cl_get(me_cl);
+ if (!device->me_cl) {
+ kfree(device);
+ return NULL;
+ }
+ device->cl = cl;
device->dev.parent = dev->dev;
device->dev.bus = &mei_cl_bus_type;
device->dev.type = &mei_cl_device_type;
- dev_set_name(&device->dev, "%s", name);
+ strlcpy(device->name, name, sizeof(device->name));
+
+ dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl));
status = device_register(&device->dev);
if (status) {
dev_err(dev->dev, "Failed to register MEI device\n");
+ mei_me_cl_put(device->me_cl);
kfree(device);
return NULL;
}
@@ -224,11 +287,10 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver)
}
EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
-static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking)
{
struct mei_device *dev;
- struct mei_me_client *me_cl = NULL;
struct mei_cl_cb *cb = NULL;
ssize_t rets;
@@ -244,13 +306,12 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
}
/* Check if we have an ME client device */
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
+ if (!mei_me_cl_is_active(cl->me_cl)) {
rets = -ENOTTY;
goto out;
}
- if (length > me_cl->props.max_msg_length) {
+ if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
}
@@ -266,7 +327,6 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
rets = mei_cl_write(cl, cb, blocking);
out:
- mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
if (rets < 0)
mei_io_cb_free(cb);
@@ -341,16 +401,6 @@ out:
return rets;
}
-inline ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length)
-{
- return ___mei_cl_send(cl, buf, length, 0);
-}
-
-inline ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length)
-{
- return ___mei_cl_send(cl, buf, length, 1);
-}
-
ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
{
struct mei_cl *cl = device->cl;
@@ -358,23 +408,17 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
if (cl == NULL)
return -ENODEV;
- if (device->ops && device->ops->send)
- return device->ops->send(device, buf, length);
-
- return __mei_cl_send(cl, buf, length);
+ return __mei_cl_send(cl, buf, length, 1);
}
EXPORT_SYMBOL_GPL(mei_cl_send);
ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
{
- struct mei_cl *cl = device->cl;
+ struct mei_cl *cl = device->cl;
if (cl == NULL)
return -ENODEV;
- if (device->ops && device->ops->recv)
- return device->ops->recv(device, buf, length);
-
return __mei_cl_recv(cl, buf, length);
}
EXPORT_SYMBOL_GPL(mei_cl_recv);
@@ -436,7 +480,13 @@ int mei_cl_enable_device(struct mei_cl_device *device)
mutex_lock(&dev->device_lock);
- err = mei_cl_connect(cl, NULL);
+ if (mei_cl_is_connected(cl)) {
+ mutex_unlock(&dev->device_lock);
+ dev_warn(dev->dev, "Already connected");
+ return -EBUSY;
+ }
+
+ err = mei_cl_connect(cl, device->me_cl, NULL);
if (err < 0) {
mutex_unlock(&dev->device_lock);
dev_err(dev->dev, "Could not connect to the ME client");
@@ -449,10 +499,7 @@ int mei_cl_enable_device(struct mei_cl_device *device)
if (device->event_cb)
mei_cl_read_start(device->cl, 0, NULL);
- if (!device->ops || !device->ops->enable)
- return 0;
-
- return device->ops->enable(device);
+ return 0;
}
EXPORT_SYMBOL_GPL(mei_cl_enable_device);
@@ -467,9 +514,6 @@ int mei_cl_disable_device(struct mei_cl_device *device)
dev = cl->dev;
- if (device->ops && device->ops->disable)
- device->ops->disable(device);
-
device->event_cb = NULL;
mutex_lock(&dev->device_lock);
@@ -480,8 +524,6 @@ int mei_cl_disable_device(struct mei_cl_device *device)
goto out;
}
- cl->state = MEI_FILE_DISCONNECTING;
-
err = mei_cl_disconnect(cl);
if (err < 0) {
dev_err(dev->dev, "Could not disconnect from the ME client");
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1e99ef6a54a2..6decbe136ea7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -83,7 +83,7 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
}
/**
- * __mei_me_cl_del - delete me client form the list and decrease
+ * __mei_me_cl_del - delete me client from the list and decrease
* reference counter
*
* @dev: mei device
@@ -96,11 +96,25 @@ static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
if (!me_cl)
return;
- list_del(&me_cl->list);
+ list_del_init(&me_cl->list);
mei_me_cl_put(me_cl);
}
/**
+ * mei_me_cl_del - delete me client from the list and decrease
+ * reference counter
+ *
+ * @dev: mei device
+ * @me_cl: me client
+ */
+void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
+{
+ down_write(&dev->me_clients_rwsem);
+ __mei_me_cl_del(dev, me_cl);
+ up_write(&dev->me_clients_rwsem);
+}
+
+/**
* mei_me_cl_add - add me client to the list
*
* @dev: mei device
@@ -317,7 +331,7 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
{
return cl1 && cl2 &&
(cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
+ (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
}
/**
@@ -546,6 +560,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
INIT_LIST_HEAD(&cl->link);
INIT_LIST_HEAD(&cl->device_link);
cl->writing_state = MEI_IDLE;
+ cl->state = MEI_FILE_INITIALIZING;
cl->dev = dev;
}
@@ -619,7 +634,7 @@ int mei_cl_link(struct mei_cl *cl, int id)
}
/**
- * mei_cl_unlink - remove me_cl from the list
+ * mei_cl_unlink - remove host client from the list
*
* @cl: host client
*
@@ -667,17 +682,17 @@ void mei_host_client_init(struct work_struct *work)
me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (me_cl)
- mei_amthif_host_init(dev);
+ mei_amthif_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
if (me_cl)
- mei_wd_host_init(dev);
+ mei_wd_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
if (me_cl)
- mei_nfc_host_init(dev);
+ mei_nfc_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
@@ -699,7 +714,7 @@ void mei_host_client_init(struct work_struct *work)
bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
- dev->pg_event == MEI_PG_EVENT_WAIT) {
+ mei_pg_in_transition(dev)) {
dev_dbg(dev->dev, "device is in pg\n");
return false;
}
@@ -715,6 +730,120 @@ bool mei_hbuf_acquire(struct mei_device *dev)
}
/**
+ * mei_cl_set_disconnected - set disconnected state and clear
+ * associated states and resources
+ *
+ * @cl: host client
+ */
+void mei_cl_set_disconnected(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+
+ if (cl->state == MEI_FILE_DISCONNECTED ||
+ cl->state == MEI_FILE_INITIALIZING)
+ return;
+
+ cl->state = MEI_FILE_DISCONNECTED;
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ cl->mei_flow_ctrl_creds = 0;
+ cl->timer_count = 0;
+
+ if (!cl->me_cl)
+ return;
+
+ if (!WARN_ON(cl->me_cl->connect_count == 0))
+ cl->me_cl->connect_count--;
+
+ if (cl->me_cl->connect_count == 0)
+ cl->me_cl->mei_flow_ctrl_creds = 0;
+
+ mei_me_cl_put(cl->me_cl);
+ cl->me_cl = NULL;
+}
+
+static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
+{
+ if (!mei_me_cl_get(me_cl))
+ return -ENOENT;
+
+ /* only one connection is allowed for fixed address clients */
+ if (me_cl->props.fixed_address) {
+ if (me_cl->connect_count) {
+ mei_me_cl_put(me_cl);
+ return -EBUSY;
+ }
+ }
+
+ cl->me_cl = me_cl;
+ cl->state = MEI_FILE_CONNECTING;
+ cl->me_cl->connect_count++;
+
+ return 0;
+}
+
+/*
+ * mei_cl_send_disconnect - send disconnect request
+ *
+ * @cl: host client
+ * @cb: callback block
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+ struct mei_device *dev;
+ int ret;
+
+ dev = cl->dev;
+
+ ret = mei_hbm_cl_disconnect_req(dev, cl);
+ cl->status = ret;
+ if (ret) {
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+
+ return 0;
+}
+
+/**
+ * mei_cl_irq_disconnect - processes close related operation from
+ * interrupt thread context - send disconnect request
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_cl_send_disconnect(cl, cb);
+ if (ret)
+ list_move_tail(&cb->list, &cmpl_list->list);
+
+ return ret;
+}
+
+
+
+/**
* mei_cl_disconnect - disconnect host client from the me one
*
* @cl: host client
@@ -736,8 +865,13 @@ int mei_cl_disconnect(struct mei_cl *cl)
cl_dbg(dev, cl, "disconnecting");
- if (cl->state != MEI_FILE_DISCONNECTING)
+ if (!mei_cl_is_connected(cl))
+ return 0;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ mei_cl_set_disconnected(cl);
return 0;
+ }
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
@@ -746,44 +880,41 @@ int mei_cl_disconnect(struct mei_cl *cl)
return rets;
}
+ cl->state = MEI_FILE_DISCONNECTING;
+
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
rets = cb ? 0 : -ENOMEM;
if (rets)
- goto free;
+ goto out;
+
+ cl_dbg(dev, cl, "add disconnect cb to control write list\n");
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
if (mei_hbuf_acquire(dev)) {
- if (mei_hbm_cl_disconnect_req(dev, cl)) {
- rets = -ENODEV;
+ rets = mei_cl_send_disconnect(cl, cb);
+ if (rets) {
cl_err(dev, cl, "failed to disconnect.\n");
- goto free;
+ goto out;
}
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- cl_dbg(dev, cl, "add disconnect cb to control write list\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
}
- mutex_unlock(&dev->device_lock);
-
- wait_event_timeout(cl->wait,
- MEI_FILE_DISCONNECTED == cl->state,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
- if (MEI_FILE_DISCONNECTED == cl->state) {
- rets = 0;
- cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
- } else {
+ rets = cl->status;
+ if (cl->state != MEI_FILE_DISCONNECT_REPLY) {
cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
rets = -ETIME;
}
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
+out:
+ /* we disconnect also on error */
+ mei_cl_set_disconnected(cl);
+ if (!rets)
+ cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
+
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -801,53 +932,119 @@ free:
*
* Return: true if other client is connected, false - otherwise.
*/
-bool mei_cl_is_other_connecting(struct mei_cl *cl)
+static bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
- struct mei_cl *ocl; /* the other client */
-
- if (WARN_ON(!cl || !cl->dev))
- return false;
+ struct mei_cl_cb *cb;
dev = cl->dev;
- list_for_each_entry(ocl, &dev->file_list, link) {
- if (ocl->state == MEI_FILE_CONNECTING &&
- ocl != cl &&
- cl->me_client_id == ocl->me_client_id)
+ list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) {
+ if (cb->fop_type == MEI_FOP_CONNECT &&
+ mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
return true;
-
}
return false;
}
/**
+ * mei_cl_send_connect - send connect request
+ *
+ * @cl: host client
+ * @cb: callback block
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+ struct mei_device *dev;
+ int ret;
+
+ dev = cl->dev;
+
+ ret = mei_hbm_cl_connect_req(dev, cl);
+ cl->status = ret;
+ if (ret) {
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ return 0;
+}
+
+/**
+ * mei_cl_irq_connect - send connect request in irq_thread context
+ *
+ * @cl: host client
+ * @cb: callback block
+ * @cmpl_list: complete list
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int rets;
+
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (mei_cl_is_other_connecting(cl))
+ return 0;
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ rets = mei_cl_send_connect(cl, cb);
+ if (rets)
+ list_move_tail(&cb->list, &cmpl_list->list);
+
+ return rets;
+}
+
+/**
* mei_cl_connect - connect host client to the me one
*
* @cl: host client
+ * @me_cl: me client
* @file: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
-int mei_cl_connect(struct mei_cl *cl, struct file *file)
+int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
+ struct file *file)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
- if (WARN_ON(!cl || !cl->dev))
+ if (WARN_ON(!cl || !cl->dev || !me_cl))
return -ENODEV;
dev = cl->dev;
+ rets = mei_cl_set_connecting(cl, me_cl);
+ if (rets)
+ return rets;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ cl->state = MEI_FILE_CONNECTED;
+ return 0;
+ }
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
+ goto nortpm;
}
cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
@@ -855,45 +1052,40 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
if (rets)
goto out;
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
- cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- rets = -ENODEV;
+ rets = mei_cl_send_connect(cl, cb);
+ if (rets)
goto out;
- }
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- cl->state = MEI_FILE_INITIALIZING;
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
- cl->state == MEI_FILE_DISCONNECTED),
+ cl->state == MEI_FILE_DISCONNECT_REPLY),
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
- cl->state = MEI_FILE_DISCONNECTED;
- /* something went really wrong */
+ /* timeout or something went really wrong */
if (!cl->status)
cl->status = -EFAULT;
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
}
rets = cl->status;
-
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
+
+nortpm:
+ if (!mei_cl_is_connected(cl))
+ mei_cl_set_disconnected(cl);
+
return rets;
}
@@ -934,36 +1126,29 @@ err:
* @cl: private data of the file object
*
* Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- * -ENOENT if mei_cl is not present
- * -EINVAL if single_recv_buf == 0
*/
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
- struct mei_device *dev;
- struct mei_me_client *me_cl;
- int rets = 0;
+ int rets;
- if (WARN_ON(!cl || !cl->dev))
+ if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
- dev = cl->dev;
-
if (cl->mei_flow_ctrl_creds > 0)
return 1;
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
- return -ENOENT;
+ if (mei_cl_is_fixed_address(cl)) {
+ rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+ if (rets && rets != -EBUSY)
+ return rets;
+ return 1;
}
- if (me_cl->mei_flow_ctrl_creds > 0) {
- rets = 1;
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- rets = -EINVAL;
+ if (mei_cl_is_single_recv_buf(cl)) {
+ if (cl->me_cl->mei_flow_ctrl_creds > 0)
+ return 1;
}
- mei_me_cl_put(me_cl);
- return rets;
+ return 0;
}
/**
@@ -973,43 +1158,26 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
*
* Return:
* 0 on success
- * -ENOENT when me client is not found
* -EINVAL when ctrl credits are <= 0
*/
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
- struct mei_device *dev;
- struct mei_me_client *me_cl;
- int rets;
-
- if (WARN_ON(!cl || !cl->dev))
+ if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
- dev = cl->dev;
-
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
- return -ENOENT;
- }
+ if (mei_cl_is_fixed_address(cl))
+ return 0;
- if (me_cl->props.single_recv_buf) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) {
- rets = -EINVAL;
- goto out;
- }
- me_cl->mei_flow_ctrl_creds--;
+ if (mei_cl_is_single_recv_buf(cl)) {
+ if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->me_cl->mei_flow_ctrl_creds--;
} else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) {
- rets = -EINVAL;
- goto out;
- }
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
cl->mei_flow_ctrl_creds--;
}
- rets = 0;
-out:
- mei_me_cl_put(me_cl);
- return rets;
+ return 0;
}
/**
@@ -1025,7 +1193,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
- struct mei_me_client *me_cl;
int rets;
if (WARN_ON(!cl || !cl->dev))
@@ -1040,27 +1207,29 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
if (!list_empty(&cl->rd_pending))
return -EBUSY;
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
+ if (!mei_me_cl_is_active(cl->me_cl)) {
+ cl_err(dev, cl, "no such me client\n");
return -ENOTTY;
}
+
/* always allocate at least client max message */
- length = max_t(size_t, length, me_cl->props.max_msg_length);
- mei_me_cl_put(me_cl);
+ length = max_t(size_t, length, mei_cl_mtu(cl));
+ cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
+ if (!cb)
+ return -ENOMEM;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ list_add_tail(&cb->list, &cl->rd_pending);
+ return 0;
+ }
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
+ goto nortpm;
}
- cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
- rets = cb ? 0 : -ENOMEM;
- if (rets)
- goto out;
-
if (mei_hbuf_acquire(dev)) {
rets = mei_hbm_cl_flow_control_req(dev, cl);
if (rets < 0)
@@ -1068,6 +1237,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
list_add_tail(&cb->list, &cl->rd_pending);
} else {
+ rets = 0;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
@@ -1075,7 +1245,7 @@ out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
-
+nortpm:
if (rets)
mei_io_cb_free(cb);
@@ -1102,6 +1272,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
u32 msg_slots;
int slots;
int rets;
+ bool first_chunk;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -1110,7 +1281,9 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
buf = &cb->buf;
- rets = mei_cl_flow_ctrl_creds(cl);
+ first_chunk = cb->buf_idx == 0;
+
+ rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
if (rets < 0)
return rets;
@@ -1123,8 +1296,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
len = buf->size - cb->buf_idx;
msg_slots = mei_data2slots(len);
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.host_addr = mei_cl_host_addr(cl);
+ mei_hdr.me_addr = mei_cl_me_id(cl);
mei_hdr.reserved = 0;
mei_hdr.internal = cb->internal;
@@ -1157,12 +1330,14 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->buf_idx += mei_hdr.length;
cb->completed = mei_hdr.msg_complete == 1;
- if (mei_hdr.msg_complete) {
+ if (first_chunk) {
if (mei_cl_flow_ctrl_reduce(cl))
return -EIO;
- list_move_tail(&cb->list, &dev->write_waiting_list.list);
}
+ if (mei_hdr.msg_complete)
+ list_move_tail(&cb->list, &dev->write_waiting_list.list);
+
return 0;
}
@@ -1207,8 +1382,8 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.host_addr = mei_cl_host_addr(cl);
+ mei_hdr.me_addr = mei_cl_me_id(cl);
mei_hdr.reserved = 0;
mei_hdr.msg_complete = 0;
mei_hdr.internal = cb->internal;
@@ -1241,21 +1416,19 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
if (rets)
goto err;
+ rets = mei_cl_flow_ctrl_reduce(cl);
+ if (rets)
+ goto err;
+
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
cb->completed = mei_hdr.msg_complete == 1;
out:
- if (mei_hdr.msg_complete) {
- rets = mei_cl_flow_ctrl_reduce(cl);
- if (rets < 0)
- goto err;
-
+ if (mei_hdr.msg_complete)
list_add_tail(&cb->list, &dev->write_waiting_list.list);
- } else {
+ else
list_add_tail(&cb->list, &dev->write_list.list);
- }
-
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
@@ -1289,20 +1462,36 @@ err:
*/
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
- if (cb->fop_type == MEI_FOP_WRITE) {
+ struct mei_device *dev = cl->dev;
+
+ switch (cb->fop_type) {
+ case MEI_FOP_WRITE:
mei_io_cb_free(cb);
- cb = NULL;
cl->writing_state = MEI_WRITE_COMPLETE;
- if (waitqueue_active(&cl->tx_wait))
+ if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
+ } else {
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_request_autosuspend(dev->dev);
+ }
+ break;
- } else if (cb->fop_type == MEI_FOP_READ) {
+ case MEI_FOP_READ:
list_add_tail(&cb->list, &cl->rd_completed);
if (waitqueue_active(&cl->rx_wait))
wake_up_interruptible_all(&cl->rx_wait);
else
mei_cl_bus_rx_event(cl);
+ break;
+
+ case MEI_FOP_CONNECT:
+ case MEI_FOP_DISCONNECT:
+ if (waitqueue_active(&cl->wait))
+ wake_up(&cl->wait);
+ break;
+ default:
+ BUG_ON(0);
}
}
@@ -1312,16 +1501,12 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
*
* @dev: mei device
*/
-
void mei_cl_all_disconnect(struct mei_device *dev)
{
struct mei_cl *cl;
- list_for_each_entry(cl, &dev->file_list, link) {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->mei_flow_ctrl_creds = 0;
- cl->timer_count = 0;
- }
+ list_for_each_entry(cl, &dev->file_list, link)
+ mei_cl_set_disconnected(cl);
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 0a39e5d45171..8d7f057f1045 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -44,6 +44,30 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 id);
void mei_me_cl_rm_all(struct mei_device *dev);
+/**
+ * mei_me_cl_is_active - check whether me client is active in the fw
+ *
+ * @me_cl: me client
+ *
+ * Return: true if the me client is active in the firmware
+ */
+static inline bool mei_me_cl_is_active(const struct mei_me_client *me_cl)
+{
+ return !list_empty_careful(&me_cl->list);
+}
+
+/**
+ * mei_me_cl_uuid - return me client protocol name (uuid)
+ *
+ * @me_cl: me client
+ *
+ * Return: me client protocol name
+ */
+static inline const uuid_le *mei_me_cl_uuid(const struct mei_me_client *me_cl)
+{
+ return &me_cl->props.protocol_name;
+}
+
/*
* MEI IO Functions
*/
@@ -94,18 +118,96 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/**
* mei_cl_is_connected - host client is connected
*
- * @cl: host clinet
+ * @cl: host client
*
- * Return: true if the host clinet is connected
+ * Return: true if the host client is connected
*/
static inline bool mei_cl_is_connected(struct mei_cl *cl)
{
return cl->state == MEI_FILE_CONNECTED;
}
-bool mei_cl_is_other_connecting(struct mei_cl *cl);
+/**
+ * mei_cl_me_id - me client id
+ *
+ * @cl: host client
+ *
+ * Return: me client id or 0 if client is not connected
+ */
+static inline u8 mei_cl_me_id(const struct mei_cl *cl)
+{
+ return cl->me_cl ? cl->me_cl->client_id : 0;
+}
+
+/**
+ * mei_cl_mtu - maximal message that client can send and receive
+ *
+ * @cl: host client
+ *
+ * Return: mtu
+ */
+static inline size_t mei_cl_mtu(const struct mei_cl *cl)
+{
+ return cl->me_cl->props.max_msg_length;
+}
+
+/**
+ * mei_cl_is_fixed_address - check whether the me client uses fixed address
+ *
+ * @cl: host client
+ *
+ * Return: true if the client is connected and it has fixed me address
+ */
+static inline bool mei_cl_is_fixed_address(const struct mei_cl *cl)
+{
+ return cl->me_cl && cl->me_cl->props.fixed_address;
+}
+
+/**
+ * mei_cl_is_single_recv_buf- check whether the me client
+ * uses single receiving buffer
+ *
+ * @cl: host client
+ *
+ * Return: true if single_recv_buf == 1; 0 otherwise
+ */
+static inline bool mei_cl_is_single_recv_buf(const struct mei_cl *cl)
+{
+ return cl->me_cl->props.single_recv_buf;
+}
+
+/**
+ * mei_cl_uuid - client's uuid
+ *
+ * @cl: host client
+ *
+ * Return: return uuid of connected me client
+ */
+static inline const uuid_le *mei_cl_uuid(const struct mei_cl *cl)
+{
+ return mei_me_cl_uuid(cl->me_cl);
+}
+
+/**
+ * mei_cl_host_addr - client's host address
+ *
+ * @cl: host client
+ *
+ * Return: 0 for fixed address client, host address for dynamic client
+ */
+static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
+{
+ return mei_cl_is_fixed_address(cl) ? 0 : cl->host_client_id;
+}
+
int mei_cl_disconnect(struct mei_cl *cl);
-int mei_cl_connect(struct mei_cl *cl, struct file *file);
+void mei_cl_set_disconnected(struct mei_cl *cl);
+int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
+int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
+ struct file *file);
+int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
struct mei_cl_cb *cmpl_list);
@@ -117,14 +219,12 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
void mei_host_client_init(struct work_struct *work);
-
-
void mei_cl_all_disconnect(struct mei_device *dev);
void mei_cl_all_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
#define MEI_CL_FMT "cl:host=%02d me=%02d "
-#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id
+#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
#define cl_dbg(dev, cl, format, arg...) \
dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index d9cd7e6ee484..eb868341247f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -116,7 +116,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
pos += scnprintf(buf + pos, bufsz - pos,
"%2d|%2d|%4d|%5d|%2d|%2d|\n",
- i, cl->me_client_id, cl->host_client_id, cl->state,
+ i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
!list_empty(&cl->rd_completed), cl->writing_state);
i++;
}
@@ -149,6 +149,13 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
mei_dev_state_str(dev->dev_state));
pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
mei_hbm_state_str(dev->hbm_state));
+
+ if (dev->hbm_state == MEI_HBM_STARTED) {
+ pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
+ dev->hbm_f_pg_supported);
+ }
+
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
mei_pg_state_str(mei_pg_state(dev)));
@@ -209,6 +216,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(dev->dev, "devstate: registration failed\n");
goto err;
}
+ f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+ &dev->allow_fixed_address);
+ if (!f) {
+ dev_err(dev->dev, "allow_fixed_address: registration failed\n");
+ goto err;
+ }
dev->dbgfs_dir = dir;
return 0;
err:
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 58da92565c5e..a4f283165a33 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -150,8 +150,8 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
memset(cmd, 0, len);
cmd->hbm_cmd = hbm_cmd;
- cmd->host_addr = cl->host_client_id;
- cmd->me_addr = cl->me_client_id;
+ cmd->host_addr = mei_cl_host_addr(cl);
+ cmd->me_addr = mei_cl_me_id(cl);
}
/**
@@ -188,8 +188,8 @@ int mei_hbm_cl_write(struct mei_device *dev,
static inline
bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd)
{
- return cl->host_client_id == cmd->host_addr &&
- cl->me_client_id == cmd->me_addr;
+ return mei_cl_host_addr(cl) == cmd->host_addr &&
+ mei_cl_me_id(cl) == cmd->me_addr;
}
/**
@@ -572,7 +572,7 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
if (rs->status == MEI_CL_DISCONN_SUCCESS)
- cl->state = MEI_FILE_DISCONNECTED;
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
cl->status = 0;
}
@@ -611,7 +611,7 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
if (rs->status == MEI_CL_CONN_SUCCESS)
cl->state = MEI_FILE_CONNECTED;
else
- cl->state = MEI_FILE_DISCONNECTED;
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -680,8 +680,8 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
if (cl) {
- cl_dbg(dev, cl, "disconnect request received\n");
- cl->state = MEI_FILE_DISCONNECTED;
+ cl_dbg(dev, cl, "fw disconnect request received\n");
+ cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6fb75e62a764..43d7101ff993 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -663,11 +663,27 @@ int mei_me_pg_exit_sync(struct mei_device *dev)
mutex_lock(&dev->device_lock);
reply:
- if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
- ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
+ ret = 0;
else
ret = -ETIME;
+out:
dev->pg_event = MEI_PG_EVENT_IDLE;
hw->pg_state = MEI_PG_OFF;
@@ -675,6 +691,19 @@ reply:
}
/**
+ * mei_me_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_me_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+/**
* mei_me_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
@@ -705,6 +734,24 @@ notsupported:
}
/**
+ * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_intr(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
+ return;
+
+ dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
+ hw->pg_state = MEI_PG_OFF;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+}
+
+/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
@@ -761,6 +808,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
goto end;
}
+ mei_me_pg_intr(dev);
+
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
@@ -797,9 +846,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/*
* During PG handshake only allowed write is the replay to the
* PG exit message, so block calling write function
- * if the pg state is not idle
+ * if the pg event is in PG handshake
*/
- if (dev->pg_event == MEI_PG_EVENT_IDLE) {
+ if (dev->pg_event != MEI_PG_EVENT_WAIT &&
+ dev->pg_event != MEI_PG_EVENT_RECEIVED) {
rets = mei_irq_write_handler(dev, &complete_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
@@ -824,6 +874,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hw_config = mei_me_hw_config,
.hw_start = mei_me_hw_start,
+ .pg_in_transition = mei_me_pg_in_transition,
.pg_is_enabled = mei_me_pg_is_enabled,
.intr_clear = mei_me_intr_clear,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 7abafe7d120d..bae680c648ff 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/irqreturn.h>
@@ -218,26 +219,25 @@ static u32 mei_txe_aliveness_get(struct mei_device *dev)
*
* Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
- * Return: > 0 if the expected value was received, -ETIME otherwise
+ * Return: 0 if the expected value was received, -ETIME otherwise
*/
static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
- int t = 0;
+ ktime_t stop, start;
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev,
- "aliveness settled after %d msecs\n", t);
- return t;
+ dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return 0;
}
- mutex_unlock(&dev->device_lock);
- msleep(MSEC_PER_SEC / 5);
- mutex_lock(&dev->device_lock);
- t += MSEC_PER_SEC / 5;
- } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+ usleep_range(20, 50);
+ } while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(dev->dev, "aliveness timed out\n");
@@ -302,6 +302,18 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
}
/**
+ * mei_txe_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_txe_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event == MEI_PG_EVENT_WAIT;
+}
+
+/**
* mei_txe_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
@@ -1138,6 +1150,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
+ .pg_in_transition = mei_txe_pg_in_transition,
.pg_is_enabled = mei_txe_pg_is_enabled,
.intr_clear = mei_txe_intr_clear,
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 97353cf8d9b6..94514b2c7a50 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -361,13 +361,15 @@ bool mei_write_is_idle(struct mei_device *dev)
{
bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
list_empty(&dev->ctrl_wr_list.list) &&
- list_empty(&dev->write_list.list));
+ list_empty(&dev->write_list.list) &&
+ list_empty(&dev->write_waiting_list.list));
- dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n",
+ dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
list_empty(&dev->ctrl_wr_list.list),
- list_empty(&dev->write_list.list));
+ list_empty(&dev->write_list.list),
+ list_empty(&dev->write_waiting_list.list));
return idle;
}
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3f84d2edcde4..3f3405269c39 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -65,8 +65,8 @@ EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
static inline int mei_cl_hbm_equal(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr)
{
- return cl->host_client_id == mei_hdr->host_addr &&
- cl->me_client_id == mei_hdr->me_addr;
+ return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
+ mei_cl_me_id(cl) == mei_hdr->me_addr;
}
/**
@@ -180,56 +180,14 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
return -EMSGSIZE;
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
-
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = 0;
+ mei_cl_set_disconnected(cl);
mei_io_cb_free(cb);
+ mei_me_cl_put(cl->me_cl);
+ cl->me_cl = NULL;
return ret;
}
-
-
-/**
- * mei_cl_irq_disconnect - processes close related operation from
- * interrupt thread context - send disconnect request
- *
- * @cl: client
- * @cb: callback block.
- * @cmpl_list: complete list.
- *
- * Return: 0, OK; otherwise, error.
- */
-static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
-{
- struct mei_device *dev = cl->dev;
- u32 msg_slots;
- int slots;
-
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
- slots = mei_hbuf_empty_slots(dev);
-
- if (slots < msg_slots)
- return -EMSGSIZE;
-
- if (mei_hbm_cl_disconnect_req(dev, cl)) {
- cl->status = 0;
- cb->buf_idx = 0;
- list_move_tail(&cb->list, &cmpl_list->list);
- return -EIO;
- }
-
- cl->state = MEI_FILE_DISCONNECTING;
- cl->status = 0;
- cb->buf_idx = 0;
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
-
- return 0;
-}
-
-
/**
* mei_cl_irq_read - processes client read related operation from the
* interrupt thread context - request for flow control credits
@@ -267,49 +225,6 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
-
-/**
- * mei_cl_irq_connect - send connect request in irq_thread context
- *
- * @cl: client
- * @cb: callback block.
- * @cmpl_list: complete list.
- *
- * Return: 0, OK; otherwise, error.
- */
-static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
-{
- struct mei_device *dev = cl->dev;
- u32 msg_slots;
- int slots;
- int ret;
-
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
- slots = mei_hbuf_empty_slots(dev);
-
- if (mei_cl_is_other_connecting(cl))
- return 0;
-
- if (slots < msg_slots)
- return -EMSGSIZE;
-
- cl->state = MEI_FILE_CONNECTING;
-
- ret = mei_hbm_cl_connect_req(dev, cl);
- if (ret) {
- cl->status = ret;
- cb->buf_idx = 0;
- list_del_init(&cb->list);
- return ret;
- }
-
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- return 0;
-}
-
-
/**
* mei_irq_read_handler - bottom half read routine after ISR to
* handle the read processing.
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 3e2968159506..8eb0a9500a90 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -94,7 +94,7 @@ static int mei_release(struct inode *inode, struct file *file)
{
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
- int rets = 0;
+ int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -106,11 +106,8 @@ static int mei_release(struct inode *inode, struct file *file)
rets = mei_amthif_release(dev, file);
goto out;
}
- if (mei_cl_is_connected(cl)) {
- cl->state = MEI_FILE_DISCONNECTING;
- cl_dbg(dev, cl, "disconnecting\n");
- rets = mei_cl_disconnect(cl);
- }
+ rets = mei_cl_disconnect(cl);
+
mei_cl_flush_queues(cl, file);
cl_dbg(dev, cl, "removing\n");
@@ -186,8 +183,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
err = mei_cl_read_start(cl, length, file);
if (err && err != -EBUSY) {
- dev_dbg(dev->dev,
- "mei start read failure with status = %d\n", err);
+ cl_dbg(dev, cl, "mei start read failure status = %d\n", err);
rets = err;
goto out;
}
@@ -218,6 +214,11 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
cb = mei_cl_read_cb(cl, file);
if (!cb) {
+ if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
+ cb = mei_cl_read_cb(cl, NULL);
+ if (cb)
+ goto copy_buffer;
+ }
rets = 0;
goto out;
}
@@ -226,11 +227,11 @@ copy_buffer:
/* now copy the data to user space */
if (cb->status) {
rets = cb->status;
- dev_dbg(dev->dev, "read operation failed %d\n", rets);
+ cl_dbg(dev, cl, "read operation failed %d\n", rets);
goto free;
}
- dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n",
+ cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
cb->buf.size, cb->buf_idx);
if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
rets = -EMSGSIZE;
@@ -256,7 +257,7 @@ free:
mei_io_cb_free(cb);
out:
- dev_dbg(dev->dev, "end mei read rets= %d\n", rets);
+ cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -274,7 +275,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
- struct mei_me_client *me_cl = NULL;
struct mei_cl_cb *write_cb = NULL;
struct mei_device *dev;
unsigned long timeout = 0;
@@ -292,27 +292,27 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- rets = -ENOTTY;
+ if (!mei_cl_is_connected(cl)) {
+ cl_err(dev, cl, "is not connected");
+ rets = -ENODEV;
goto out;
}
- if (length == 0) {
- rets = 0;
+ if (!mei_me_cl_is_active(cl->me_cl)) {
+ rets = -ENOTTY;
goto out;
}
- if (length > me_cl->props.max_msg_length) {
+ if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
}
- if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "is not connected");
- rets = -ENODEV;
+ if (length == 0) {
+ rets = 0;
goto out;
}
+
if (cl == &dev->iamthif_cl) {
write_cb = mei_amthif_find_read_list_entry(dev, file);
@@ -350,14 +350,12 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
"amthif write failed with status = %d\n", rets);
goto out;
}
- mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
return length;
}
rets = mei_cl_write(cl, write_cb, false);
out:
- mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
if (rets < 0)
mei_io_cb_free(write_cb);
@@ -395,17 +393,16 @@ static int mei_ioctl_connect_client(struct file *file,
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (!me_cl || me_cl->props.fixed_address) {
+ if (!me_cl ||
+ (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
- &data->in_client_uuid);
+ &data->in_client_uuid);
+ mei_me_cl_put(me_cl);
return -ENOTTY;
}
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
-
dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
- cl->me_client_id);
+ me_cl->client_id);
dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
me_cl->props.protocol_version);
dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
@@ -441,7 +438,7 @@ static int mei_ioctl_connect_client(struct file *file,
client->protocol_version = me_cl->props.protocol_version;
dev_dbg(dev->dev, "Can connect?\n");
- rets = mei_cl_connect(cl, file);
+ rets = mei_cl_connect(cl, me_cl, file);
end:
mei_me_cl_put(me_cl);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f066ecd71939..453f6a333b42 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -88,7 +88,8 @@ enum file_state {
MEI_FILE_CONNECTING,
MEI_FILE_CONNECTED,
MEI_FILE_DISCONNECTING,
- MEI_FILE_DISCONNECTED
+ MEI_FILE_DISCONNECT_REPLY,
+ MEI_FILE_DISCONNECTED,
};
/* MEI device states */
@@ -176,6 +177,8 @@ struct mei_fw_status {
* @props: client properties
* @client_id: me client id
* @mei_flow_ctrl_creds: flow control credits
+ * @connect_count: number connections to this client
+ * @reserved: reserved
*/
struct mei_me_client {
struct list_head list;
@@ -183,6 +186,8 @@ struct mei_me_client {
struct mei_client_properties props;
u8 client_id;
u8 mei_flow_ctrl_creds;
+ u8 connect_count;
+ u8 reserved;
};
@@ -226,11 +231,11 @@ struct mei_cl_cb {
* @rx_wait: wait queue for rx completion
* @wait: wait queue for management operation
* @status: connection status
- * @cl_uuid: client uuid name
+ * @me_cl: fw client connected
* @host_client_id: host id
- * @me_client_id: me/fw id
* @mei_flow_ctrl_creds: transmit flow credentials
* @timer_count: watchdog timer for operation completion
+ * @reserved: reserved for alignment
* @writing_state: state of the tx
* @rd_pending: pending read credits
* @rd_completed: completed read
@@ -246,11 +251,11 @@ struct mei_cl {
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
int status;
- uuid_le cl_uuid;
+ struct mei_me_client *me_cl;
u8 host_client_id;
- u8 me_client_id;
u8 mei_flow_ctrl_creds;
u8 timer_count;
+ u8 reserved;
enum mei_file_transaction_states writing_state;
struct list_head rd_pending;
struct list_head rd_completed;
@@ -271,6 +276,7 @@ struct mei_cl {
* @fw_status : get fw status registers
* @pg_state : power gating state of the device
+ * @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
* @intr_clear : clear pending interrupts
@@ -300,6 +306,7 @@ struct mei_hw_ops {
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
enum mei_pg_state (*pg_state)(struct mei_device *dev);
+ bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
void (*intr_clear)(struct mei_device *dev);
@@ -323,34 +330,14 @@ struct mei_hw_ops {
/* MEI bus API*/
-/**
- * struct mei_cl_ops - MEI CL device ops
- * This structure allows ME host clients to implement technology
- * specific operations.
- *
- * @enable: Enable an MEI CL device. Some devices require specific
- * HECI commands to initialize completely.
- * @disable: Disable an MEI CL device.
- * @send: Tx hook for the device. This allows ME host clients to trap
- * the device driver buffers before actually physically
- * pushing it to the ME.
- * @recv: Rx hook for the device. This allows ME host clients to trap the
- * ME buffers before forwarding them to the device driver.
- */
-struct mei_cl_ops {
- int (*enable)(struct mei_cl_device *device);
- int (*disable)(struct mei_cl_device *device);
- int (*send)(struct mei_cl_device *device, u8 *buf, size_t length);
- int (*recv)(struct mei_cl_device *device, u8 *buf, size_t length);
-};
-
struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
- uuid_le uuid, char *name,
- struct mei_cl_ops *ops);
+ struct mei_me_client *me_cl,
+ struct mei_cl *cl,
+ char *name);
void mei_cl_remove_device(struct mei_cl_device *device);
-ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
-ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ bool blocking);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
void mei_cl_bus_rx_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *dev);
@@ -358,51 +345,21 @@ int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
-
-/**
- * struct mei_cl_device - MEI device handle
- * An mei_cl_device pointer is returned from mei_add_device()
- * and links MEI bus clients to their actual ME host client pointer.
- * Drivers for MEI devices will get an mei_cl_device pointer
- * when being probed and shall use it for doing ME bus I/O.
- *
- * @dev: linux driver model device pointer
- * @cl: mei client
- * @ops: ME transport ops
- * @event_work: async work to execute event callback
- * @event_cb: Drivers register this callback to get asynchronous ME
- * events (e.g. Rx buffer pending) notifications.
- * @event_context: event callback run context
- * @events: Events bitmask sent to the driver.
- * @priv_data: client private data
- */
-struct mei_cl_device {
- struct device dev;
-
- struct mei_cl *cl;
-
- const struct mei_cl_ops *ops;
-
- struct work_struct event_work;
- mei_cl_event_cb_t event_cb;
- void *event_context;
- unsigned long events;
-
- void *priv_data;
-};
-
-
/**
* enum mei_pg_event - power gating transition events
*
* @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
* @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
* @MEI_PG_EVENT_RECEIVED: the driver received pg event
+ * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt
+ * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt
*/
enum mei_pg_event {
MEI_PG_EVENT_IDLE,
MEI_PG_EVENT_WAIT,
MEI_PG_EVENT_RECEIVED,
+ MEI_PG_EVENT_INTR_WAIT,
+ MEI_PG_EVENT_INTR_RECEIVED,
};
/**
@@ -467,6 +424,8 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @host_clients_map : host clients id pool
* @me_client_index : last FW client index in enumeration
*
+ * @allow_fixed_address: allow user space to connect a fixed client
+ *
* @wd_cl : watchdog client
* @wd_state : watchdog client state
* @wd_pending : watchdog command is pending
@@ -479,7 +438,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @iamthif_cl : amthif host client
* @iamthif_current_cb : amthif current operation callback
* @iamthif_open_count : number of opened amthif connections
- * @iamthif_mtu : amthif client max message length
* @iamthif_timer : time stamp of current amthif command completion
* @iamthif_stall_timer : timer to detect amthif hang
* @iamthif_state : amthif processor state
@@ -558,6 +516,8 @@ struct mei_device {
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
unsigned long me_client_index;
+ u32 allow_fixed_address;
+
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
bool wd_pending;
@@ -573,7 +533,6 @@ struct mei_device {
struct mei_cl iamthif_cl;
struct mei_cl_cb *iamthif_current_cb;
long iamthif_open_count;
- int iamthif_mtu;
unsigned long iamthif_timer;
u32 iamthif_stall_timer;
enum iamthif_states iamthif_state;
@@ -652,7 +611,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
*/
void mei_amthif_reset_params(struct mei_device *dev);
-int mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
int mei_amthif_read(struct mei_device *dev, struct file *file,
char __user *ubuf, size_t length, loff_t *offset);
@@ -679,7 +638,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/*
* NFC functions
*/
-int mei_nfc_host_init(struct mei_device *dev);
+int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
void mei_nfc_host_exit(struct mei_device *dev);
/*
@@ -689,7 +648,7 @@ extern const uuid_le mei_nfc_guid;
int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
/*
* mei_watchdog_register - Registering watchdog interface
* once we got connection to the WD Client
@@ -717,6 +676,11 @@ static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
return dev->ops->pg_state(dev);
}
+static inline bool mei_pg_in_transition(struct mei_device *dev)
+{
+ return dev->ops->pg_in_transition(dev);
+}
+
static inline bool mei_pg_is_enabled(struct mei_device *dev)
{
return dev->ops->pg_is_enabled(dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index c3bcb63686d7..b983c4ecad38 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -91,30 +91,25 @@ struct mei_nfc_hci_hdr {
/**
* struct mei_nfc_dev - NFC mei device
*
+ * @me_cl: NFC me client
* @cl: NFC host client
* @cl_info: NFC info host client
* @init_work: perform connection to the info client
- * @send_wq: send completion wait queue
* @fw_ivn: NFC Interface Version Number
* @vendor_id: NFC manufacturer ID
* @radio_type: NFC radio type
* @bus_name: bus name
*
- * @req_id: message counter
- * @recv_req_id: reception message counter
*/
struct mei_nfc_dev {
+ struct mei_me_client *me_cl;
struct mei_cl *cl;
struct mei_cl *cl_info;
struct work_struct init_work;
- wait_queue_head_t send_wq;
u8 fw_ivn;
u8 vendor_id;
u8 radio_type;
char *bus_name;
-
- u16 req_id;
- u16 recv_req_id;
};
/* UUIDs for NFC F/W clients */
@@ -151,6 +146,7 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
kfree(ndev->cl_info);
}
+ mei_me_cl_put(ndev->me_cl);
kfree(ndev);
}
@@ -199,73 +195,6 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
return 0;
}
-static int mei_nfc_connect(struct mei_nfc_dev *ndev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
- struct mei_nfc_cmd *cmd, *reply;
- struct mei_nfc_connect *connect;
- struct mei_nfc_connect_resp *connect_resp;
- size_t connect_length, connect_resp_length;
- int bytes_recv, ret;
-
- cl = ndev->cl;
- dev = cl->dev;
-
- connect_length = sizeof(struct mei_nfc_cmd) +
- sizeof(struct mei_nfc_connect);
-
- connect_resp_length = sizeof(struct mei_nfc_cmd) +
- sizeof(struct mei_nfc_connect_resp);
-
- cmd = kzalloc(connect_length, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
- connect = (struct mei_nfc_connect *)cmd->data;
-
- reply = kzalloc(connect_resp_length, GFP_KERNEL);
- if (!reply) {
- kfree(cmd);
- return -ENOMEM;
- }
-
- connect_resp = (struct mei_nfc_connect_resp *)reply->data;
-
- cmd->command = MEI_NFC_CMD_MAINTENANCE;
- cmd->data_size = 3;
- cmd->sub_command = MEI_NFC_SUBCMD_CONNECT;
- connect->fw_ivn = ndev->fw_ivn;
- connect->vendor_id = ndev->vendor_id;
-
- ret = __mei_cl_send(cl, (u8 *)cmd, connect_length);
- if (ret < 0) {
- dev_err(dev->dev, "Could not send connect cmd\n");
- goto err;
- }
-
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length);
- if (bytes_recv < 0) {
- dev_err(dev->dev, "Could not read connect response\n");
- ret = bytes_recv;
- goto err;
- }
-
- dev_info(dev->dev, "IVN 0x%x Vendor ID 0x%x\n",
- connect_resp->fw_ivn, connect_resp->vendor_id);
-
- dev_info(dev->dev, "ME FW %d.%d.%d.%d\n",
- connect_resp->me_major, connect_resp->me_minor,
- connect_resp->me_hotfix, connect_resp->me_build);
-
- ret = 0;
-
-err:
- kfree(reply);
- kfree(cmd);
-
- return ret;
-}
-
static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
{
struct mei_device *dev;
@@ -285,7 +214,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
cmd.data_size = 1;
cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd));
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
if (ret < 0) {
dev_err(dev->dev, "Could not send IF version cmd\n");
return ret;
@@ -317,106 +246,13 @@ err:
return ret;
}
-static int mei_nfc_enable(struct mei_cl_device *cldev)
-{
- struct mei_device *dev;
- struct mei_nfc_dev *ndev;
- int ret;
-
- ndev = (struct mei_nfc_dev *)cldev->priv_data;
- dev = ndev->cl->dev;
-
- ret = mei_nfc_connect(ndev);
- if (ret < 0) {
- dev_err(dev->dev, "Could not connect to NFC");
- return ret;
- }
-
- return 0;
-}
-
-static int mei_nfc_disable(struct mei_cl_device *cldev)
-{
- return 0;
-}
-
-static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
-{
- struct mei_device *dev;
- struct mei_nfc_dev *ndev;
- struct mei_nfc_hci_hdr *hdr;
- u8 *mei_buf;
- int err;
-
- ndev = (struct mei_nfc_dev *) cldev->priv_data;
- dev = ndev->cl->dev;
-
- err = -ENOMEM;
- mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
- if (!mei_buf)
- goto out;
-
- hdr = (struct mei_nfc_hci_hdr *) mei_buf;
- hdr->cmd = MEI_NFC_CMD_HCI_SEND;
- hdr->status = 0;
- hdr->req_id = ndev->req_id;
- hdr->reserved = 0;
- hdr->data_size = length;
-
- memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
- err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
- if (err < 0)
- goto out;
-
- if (!wait_event_interruptible_timeout(ndev->send_wq,
- ndev->recv_req_id == ndev->req_id, HZ)) {
- dev_err(dev->dev, "NFC MEI command timeout\n");
- err = -ETIME;
- } else {
- ndev->req_id++;
- }
-out:
- kfree(mei_buf);
- return err;
-}
-
-static int mei_nfc_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
-{
- struct mei_nfc_dev *ndev;
- struct mei_nfc_hci_hdr *hci_hdr;
- int received_length;
-
- ndev = (struct mei_nfc_dev *)cldev->priv_data;
-
- received_length = __mei_cl_recv(ndev->cl, buf, length);
- if (received_length < 0)
- return received_length;
-
- hci_hdr = (struct mei_nfc_hci_hdr *) buf;
-
- if (hci_hdr->cmd == MEI_NFC_CMD_HCI_SEND) {
- ndev->recv_req_id = hci_hdr->req_id;
- wake_up(&ndev->send_wq);
-
- return 0;
- }
-
- return received_length;
-}
-
-static struct mei_cl_ops nfc_ops = {
- .enable = mei_nfc_enable,
- .disable = mei_nfc_disable,
- .send = mei_nfc_send,
- .recv = mei_nfc_recv,
-};
-
static void mei_nfc_init(struct work_struct *work)
{
struct mei_device *dev;
struct mei_cl_device *cldev;
struct mei_nfc_dev *ndev;
struct mei_cl *cl_info;
+ struct mei_me_client *me_cl_info;
ndev = container_of(work, struct mei_nfc_dev, init_work);
@@ -425,13 +261,22 @@ static void mei_nfc_init(struct work_struct *work)
mutex_lock(&dev->device_lock);
- if (mei_cl_connect(cl_info, NULL) < 0) {
+ /* check for valid client id */
+ me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
+ if (!me_cl_info) {
+ mutex_unlock(&dev->device_lock);
+ dev_info(dev->dev, "nfc: failed to find the info client\n");
+ goto err;
+ }
+
+ if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
+ mei_me_cl_put(me_cl_info);
mutex_unlock(&dev->device_lock);
dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
goto err;
}
-
+ mei_me_cl_put(me_cl_info);
mutex_unlock(&dev->device_lock);
if (mei_nfc_if_version(ndev) < 0) {
@@ -459,7 +304,8 @@ static void mei_nfc_init(struct work_struct *work)
return;
}
- cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops);
+ cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
+ ndev->bus_name);
if (!cldev) {
dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
@@ -479,11 +325,10 @@ err:
}
-int mei_nfc_host_init(struct mei_device *dev)
+int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
{
struct mei_nfc_dev *ndev;
struct mei_cl *cl_info, *cl;
- struct mei_me_client *me_cl = NULL;
int ret;
@@ -500,11 +345,9 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
- /* check for valid client id */
- me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
- if (!me_cl) {
- dev_info(dev->dev, "nfc: failed to find the client\n");
- ret = -ENOTTY;
+ ndev->me_cl = mei_me_cl_get(me_cl);
+ if (!ndev->me_cl) {
+ ret = -ENODEV;
goto err;
}
@@ -514,48 +357,26 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
- cl_info->me_client_id = me_cl->client_id;
- cl_info->cl_uuid = me_cl->props.protocol_name;
- mei_me_cl_put(me_cl);
- me_cl = NULL;
-
list_add_tail(&cl_info->device_link, &dev->device_list);
ndev->cl_info = cl_info;
- /* check for valid client id */
- me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
- if (!me_cl) {
- dev_info(dev->dev, "nfc: failed to find the client\n");
- ret = -ENOTTY;
- goto err;
- }
-
cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
if (IS_ERR(cl)) {
ret = PTR_ERR(cl);
goto err;
}
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
- mei_me_cl_put(me_cl);
- me_cl = NULL;
-
list_add_tail(&cl->device_link, &dev->device_list);
ndev->cl = cl;
- ndev->req_id = 1;
-
INIT_WORK(&ndev->init_work, mei_nfc_init);
- init_waitqueue_head(&ndev->send_wq);
schedule_work(&ndev->init_work);
return 0;
err:
- mei_me_cl_put(me_cl);
mei_nfc_free(ndev);
return ret;
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index dcfcba44b6f7..0882c0201907 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -338,7 +338,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
* However if device is not wakeable we do not enter
* D-low state and we need to keep the interrupt kicking
*/
- if (!ret && pci_dev_run_wake(pdev))
+ if (!ret && pci_dev_run_wake(pdev))
mei_disable_interrupts(dev);
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 2725f865c3d6..2bc0f5089f82 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -50,15 +50,15 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
* mei_wd_host_init - connect to the watchdog client
*
* @dev: the device structure
+ * @me_cl: me client
*
* Return: -ENOTTY if wd client cannot be found
* -EIO if write has failed
* 0 on success
*/
-int mei_wd_host_init(struct mei_device *dev)
+int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
{
struct mei_cl *cl = &dev->wd_cl;
- struct mei_me_client *me_cl;
int ret;
mei_cl_init(cl, dev);
@@ -66,27 +66,13 @@ int mei_wd_host_init(struct mei_device *dev)
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
-
- /* check for valid client id */
- me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
- if (!me_cl) {
- dev_info(dev->dev, "wd: failed to find the client\n");
- return -ENOTTY;
- }
-
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
- mei_me_cl_put(me_cl);
-
ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
-
if (ret < 0) {
dev_info(dev->dev, "wd: failed link client\n");
return ret;
}
- ret = mei_cl_connect(cl, NULL);
-
+ ret = mei_cl_connect(cl, me_cl, NULL);
if (ret) {
dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
mei_cl_unlink(cl);
@@ -118,7 +104,7 @@ int mei_wd_send(struct mei_device *dev)
int ret;
hdr.host_addr = cl->host_client_id;
- hdr.me_addr = cl->me_client_id;
+ hdr.me_addr = mei_cl_me_id(cl);
hdr.msg_complete = 1;
hdr.reserved = 0;
hdr.internal = 0;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index cc4eef040c14..e9f2f56c370d 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -15,11 +15,28 @@ config INTEL_MIC_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+comment "SCIF Bus Driver"
+
+config SCIF_BUS
+ tristate "SCIF Bus Driver"
+ depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ help
+ This option is selected by any driver which registers a
+ device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
+ and CONFIG_INTEL_MIC_CARD.
+
+ If you are building a host/card kernel with an Intel MIC device
+ then say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
+
comment "Intel MIC Host Driver"
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
- depends on 64BIT && PCI && X86 && INTEL_MIC_BUS
+ depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS
select VHOST_RING
help
This enables Host Driver support for the Intel Many Integrated
@@ -39,7 +56,7 @@ comment "Intel MIC Card Driver"
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS
+ depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS
select VIRTIO
help
This enables card driver support for the Intel Many Integrated
@@ -52,3 +69,22 @@ config INTEL_MIC_CARD
For more information see
<http://software.intel.com/en-us/mic-developer>.
+
+comment "SCIF Driver"
+
+config SCIF
+ tristate "SCIF Driver"
+ depends on 64BIT && PCI && X86 && SCIF_BUS
+ help
+ This enables SCIF Driver support for the Intel Many Integrated
+ Core (MIC) family of PCIe form factor coprocessor devices that
+ run a 64 bit Linux OS. The Symmetric Communication Interface
+ (SCIF (pronounced as skiff)) is a low level communications API
+ across PCIe currently implemented for MIC.
+
+ If you are building a host kernel with an Intel MIC device then
+ say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index e9bf148755e2..a74042c58649 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_INTEL_MIC_HOST) += host/
obj-$(CONFIG_INTEL_MIC_CARD) += card/
-obj-$(CONFIG_INTEL_MIC_BUS) += bus/
+obj-y += bus/
+obj-$(CONFIG_SCIF) += scif/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index d85c7f2a0af4..1ed37e234c96 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -3,3 +3,4 @@
# Copyright(c) 2014, Intel Corporation.
#
obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
+obj-$(CONFIG_SCIF_BUS) += scif_bus.o
diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c
new file mode 100644
index 000000000000..2da7ceed015d
--- /dev/null
+++ b/drivers/misc/mic/bus/scif_bus.c
@@ -0,0 +1,210 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel Symmetric Communications Interface Bus driver.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/dma-mapping.h>
+
+#include "scif_bus.h"
+
+static ssize_t device_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+
+ return sprintf(buf, "scif:d%08Xv%08X\n",
+ dev->id.device, dev->id.vendor);
+}
+
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *scif_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scif_dev);
+
+static inline int scif_id_match(const struct scif_hw_dev *dev,
+ const struct scif_hw_dev_id *id)
+{
+ if (id->device != dev->id.device && id->device != SCIF_DEV_ANY_ID)
+ return 0;
+
+ return id->vendor == SCIF_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support. If any of them
+ * match, we return 1 and the kernel will call scif_dev_probe().
+ */
+static int scif_dev_match(struct device *dv, struct device_driver *dr)
+{
+ unsigned int i;
+ struct scif_hw_dev *dev = dev_to_scif(dv);
+ const struct scif_hw_dev_id *ids;
+
+ ids = drv_to_scif(dr)->id_table;
+ for (i = 0; ids[i].device; i++)
+ if (scif_id_match(dev, &ids[i]))
+ return 1;
+ return 0;
+}
+
+static int scif_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+ struct scif_hw_dev *dev = dev_to_scif(dv);
+
+ return add_uevent_var(env, "MODALIAS=scif:d%08Xv%08X",
+ dev->id.device, dev->id.vendor);
+}
+
+static int scif_dev_probe(struct device *d)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+ struct scif_driver *drv = drv_to_scif(dev->dev.driver);
+
+ return drv->probe(dev);
+}
+
+static int scif_dev_remove(struct device *d)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+ struct scif_driver *drv = drv_to_scif(dev->dev.driver);
+
+ drv->remove(dev);
+ return 0;
+}
+
+static struct bus_type scif_bus = {
+ .name = "scif_bus",
+ .match = scif_dev_match,
+ .dev_groups = scif_dev_groups,
+ .uevent = scif_uevent,
+ .probe = scif_dev_probe,
+ .remove = scif_dev_remove,
+};
+
+int scif_register_driver(struct scif_driver *driver)
+{
+ driver->driver.bus = &scif_bus;
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scif_register_driver);
+
+void scif_unregister_driver(struct scif_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scif_unregister_driver);
+
+static void scif_release_dev(struct device *d)
+{
+ struct scif_hw_dev *sdev = dev_to_scif(d);
+
+ kfree(sdev);
+}
+
+struct scif_hw_dev *
+scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+ struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
+ struct mic_mw *mmio, struct mic_mw *aper, void *dp,
+ void __iomem *rdp, struct dma_chan **chan, int num_chan)
+{
+ int ret;
+ struct scif_hw_dev *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return ERR_PTR(-ENOMEM);
+
+ sdev->dev.parent = pdev;
+ sdev->id.device = id;
+ sdev->id.vendor = SCIF_DEV_ANY_ID;
+ sdev->dev.archdata.dma_ops = dma_ops;
+ sdev->dev.release = scif_release_dev;
+ sdev->hw_ops = hw_ops;
+ sdev->dnode = dnode;
+ sdev->snode = snode;
+ dev_set_drvdata(&sdev->dev, sdev);
+ sdev->dev.bus = &scif_bus;
+ sdev->mmio = mmio;
+ sdev->aper = aper;
+ sdev->dp = dp;
+ sdev->rdp = rdp;
+ sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask;
+ dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
+ sdev->dma_ch = chan;
+ sdev->num_dma_ch = num_chan;
+ dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
+ /*
+ * device_register() causes the bus infrastructure to look for a
+ * matching driver.
+ */
+ ret = device_register(&sdev->dev);
+ if (ret)
+ goto free_sdev;
+ return sdev;
+free_sdev:
+ kfree(sdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(scif_register_device);
+
+void scif_unregister_device(struct scif_hw_dev *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(scif_unregister_device);
+
+static int __init scif_init(void)
+{
+ return bus_register(&scif_bus);
+}
+
+static void __exit scif_exit(void)
+{
+ bus_unregister(&scif_bus);
+}
+
+core_initcall(scif_init);
+module_exit(scif_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) SCIF Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
new file mode 100644
index 000000000000..335a228a8236
--- /dev/null
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -0,0 +1,129 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel Symmetric Communications Interface Bus driver.
+ */
+#ifndef _SCIF_BUS_H_
+#define _SCIF_BUS_H_
+/*
+ * Everything a scif driver needs to work with any particular scif
+ * hardware abstraction layer.
+ */
+#include <linux/dma-mapping.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+struct scif_hw_dev_id {
+ u32 device;
+ u32 vendor;
+};
+
+#define MIC_SCIF_DEV 1
+#define SCIF_DEV_ANY_ID 0xffffffff
+
+/**
+ * scif_hw_dev - representation of a hardware device abstracted for scif
+ * @hw_ops: the hardware ops supported by this device
+ * @id: the device type identification (used to match it with a driver)
+ * @mmio: MMIO memory window
+ * @aper: Aperture memory window
+ * @dev: underlying device
+ * @dnode - The destination node which this device will communicate with.
+ * @snode - The source node for this device.
+ * @dp - Self device page
+ * @rdp - Remote device page
+ * @dma_ch - Array of DMA channels
+ * @num_dma_ch - Number of DMA channels available
+ */
+struct scif_hw_dev {
+ struct scif_hw_ops *hw_ops;
+ struct scif_hw_dev_id id;
+ struct mic_mw *mmio;
+ struct mic_mw *aper;
+ struct device dev;
+ u8 dnode;
+ u8 snode;
+ void *dp;
+ void __iomem *rdp;
+ struct dma_chan **dma_ch;
+ int num_dma_ch;
+};
+
+/**
+ * scif_driver - operations for a scif I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct scif_driver {
+ struct device_driver driver;
+ const struct scif_hw_dev_id *id_table;
+ int (*probe)(struct scif_hw_dev *dev);
+ void (*remove)(struct scif_hw_dev *dev);
+};
+
+/**
+ * scif_hw_ops - Hardware operations for accessing a SCIF device on the SCIF bus.
+ *
+ * @next_db: Obtain the next available doorbell.
+ * @request_irq: Request an interrupt on a particular doorbell.
+ * @free_irq: Free an interrupt requested previously.
+ * @ack_interrupt: acknowledge an interrupt in the ISR.
+ * @send_intr: Send an interrupt to the remote node on a specified doorbell.
+ * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
+ * which is specifically targeted for a peer to peer node.
+ * @ioremap: Map a buffer with the specified physical address and length.
+ * @iounmap: Unmap a buffer previously mapped.
+ */
+struct scif_hw_ops {
+ int (*next_db)(struct scif_hw_dev *sdev);
+ struct mic_irq * (*request_irq)(struct scif_hw_dev *sdev,
+ irqreturn_t (*func)(int irq,
+ void *data),
+ const char *name, void *data,
+ int db);
+ void (*free_irq)(struct scif_hw_dev *sdev,
+ struct mic_irq *cookie, void *data);
+ void (*ack_interrupt)(struct scif_hw_dev *sdev, int num);
+ void (*send_intr)(struct scif_hw_dev *sdev, int db);
+ void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
+ struct mic_mw *mw);
+ void __iomem * (*ioremap)(struct scif_hw_dev *sdev,
+ phys_addr_t pa, size_t len);
+ void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va);
+};
+
+int scif_register_driver(struct scif_driver *driver);
+void scif_unregister_driver(struct scif_driver *driver);
+struct scif_hw_dev *
+scif_register_device(struct device *pdev, int id,
+ struct dma_map_ops *dma_ops,
+ struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
+ struct mic_mw *mmio, struct mic_mw *aper,
+ void *dp, void __iomem *rdp,
+ struct dma_chan **chan, int num_chan);
+void scif_unregister_device(struct scif_hw_dev *sdev);
+
+static inline struct scif_hw_dev *dev_to_scif(struct device *dev)
+{
+ return container_of(dev, struct scif_hw_dev, dev);
+}
+
+static inline struct scif_driver *drv_to_scif(struct device_driver *drv)
+{
+ return container_of(drv, struct scif_driver, driver);
+}
+#endif /* _SCIF_BUS_H */
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index 83819eee553b..6338908b2252 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -28,6 +28,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
+#include <linux/dmaengine.h>
+#include <linux/kmod.h>
#include <linux/mic_common.h>
#include "../common/mic_dev.h"
@@ -240,6 +242,111 @@ static void mic_uninit_irq(void)
kfree(mdrv->irq_info.irq_usage_count);
}
+static inline struct mic_driver *scdev_to_mdrv(struct scif_hw_dev *scdev)
+{
+ return dev_get_drvdata(scdev->dev.parent);
+}
+
+static struct mic_irq *
+___mic_request_irq(struct scif_hw_dev *scdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data,
+ int db)
+{
+ return mic_request_card_irq(func, NULL, name, data, db);
+}
+
+static void
+___mic_free_irq(struct scif_hw_dev *scdev,
+ struct mic_irq *cookie, void *data)
+{
+ return mic_free_card_irq(cookie, data);
+}
+
+static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ mic_ack_interrupt(&mdrv->mdev);
+}
+
+static int ___mic_next_db(struct scif_hw_dev *scdev)
+{
+ return mic_next_card_db();
+}
+
+static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ mic_send_intr(&mdrv->mdev, db);
+}
+
+static void ___mic_send_p2p_intr(struct scif_hw_dev *scdev, int db,
+ struct mic_mw *mw)
+{
+ mic_send_p2p_intr(db, mw);
+}
+
+static void __iomem *
+___mic_ioremap(struct scif_hw_dev *scdev,
+ phys_addr_t pa, size_t len)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ return mic_card_map(&mdrv->mdev, pa, len);
+}
+
+static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ mic_card_unmap(&mdrv->mdev, va);
+}
+
+static struct scif_hw_ops scif_hw_ops = {
+ .request_irq = ___mic_request_irq,
+ .free_irq = ___mic_free_irq,
+ .ack_interrupt = ___mic_ack_interrupt,
+ .next_db = ___mic_next_db,
+ .send_intr = ___mic_send_intr,
+ .send_p2p_intr = ___mic_send_p2p_intr,
+ .ioremap = ___mic_ioremap,
+ .iounmap = ___mic_iounmap,
+};
+
+static int mic_request_dma_chans(struct mic_driver *mdrv)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ request_module("mic_x100_dma");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ do {
+ chan = dma_request_channel(mask, NULL, NULL);
+ if (chan) {
+ mdrv->dma_ch[mdrv->num_dma_ch++] = chan;
+ if (mdrv->num_dma_ch >= MIC_MAX_DMA_CHAN)
+ break;
+ }
+ } while (chan);
+ dev_info(mdrv->dev, "DMA channels # %d\n", mdrv->num_dma_ch);
+ return mdrv->num_dma_ch;
+}
+
+static void mic_free_dma_chans(struct mic_driver *mdrv)
+{
+ int i = 0;
+
+ for (i = 0; i < mdrv->num_dma_ch; i++) {
+ dma_release_channel(mdrv->dma_ch[i]);
+ mdrv->dma_ch[i] = NULL;
+ }
+ mdrv->num_dma_ch = 0;
+}
+
/*
* mic_driver_init - MIC driver initialization tasks.
*
@@ -248,6 +355,8 @@ static void mic_uninit_irq(void)
int __init mic_driver_init(struct mic_driver *mdrv)
{
int rc;
+ struct mic_bootparam __iomem *bootparam;
+ u8 node_id;
g_drv = mdrv;
/*
@@ -268,13 +377,32 @@ int __init mic_driver_init(struct mic_driver *mdrv)
rc = mic_shutdown_init();
if (rc)
goto irq_uninit;
+ if (!mic_request_dma_chans(mdrv)) {
+ rc = -ENODEV;
+ goto shutdown_uninit;
+ }
rc = mic_devices_init(mdrv);
if (rc)
- goto shutdown_uninit;
+ goto dma_free;
+ bootparam = mdrv->dp;
+ node_id = ioread8(&bootparam->node_id);
+ mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
+ NULL, &scif_hw_ops,
+ 0, node_id, &mdrv->mdev.mmio, NULL,
+ NULL, mdrv->dp, mdrv->dma_ch,
+ mdrv->num_dma_ch);
+ if (IS_ERR(mdrv->scdev)) {
+ rc = PTR_ERR(mdrv->scdev);
+ goto device_uninit;
+ }
mic_create_card_debug_dir(mdrv);
atomic_notifier_chain_register(&panic_notifier_list, &mic_panic);
done:
return rc;
+device_uninit:
+ mic_devices_uninit(mdrv);
+dma_free:
+ mic_free_dma_chans(mdrv);
shutdown_uninit:
mic_shutdown_uninit();
irq_uninit:
@@ -294,7 +422,9 @@ put:
void mic_driver_uninit(struct mic_driver *mdrv)
{
mic_delete_card_debug_dir(mdrv);
+ scif_unregister_device(mdrv->scdev);
mic_devices_uninit(mdrv);
+ mic_free_dma_chans(mdrv);
/*
* Inform the host about the shutdown status i.e. poweroff/restart etc.
* The module cannot be unloaded so the only code path to call
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 844be8fc9b22..1dbf83c41289 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -29,9 +29,9 @@
#include <linux/workqueue.h>
#include <linux/io.h>
-#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/mic_bus.h>
+#include "../bus/scif_bus.h"
/**
* struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -73,6 +73,9 @@ struct mic_device {
* @irq_info: The OS specific irq information
* @intr_info: H/W specific interrupt information.
* @dma_mbdev: dma device on the MIC virtual bus.
+ * @dma_ch - Array of DMA channels
+ * @num_dma_ch - Number of DMA channels available
+ * @scdev: SCIF device on the SCIF virtual bus.
*/
struct mic_driver {
char name[20];
@@ -84,6 +87,9 @@ struct mic_driver {
struct mic_irq_info irq_info;
struct mic_intr_info intr_info;
struct mbus_device *dma_mbdev;
+ struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
+ int num_dma_ch;
+ struct scif_hw_dev *scdev;
};
/**
@@ -122,10 +128,11 @@ void mic_driver_uninit(struct mic_driver *mdrv);
int mic_next_card_db(void);
struct mic_irq *
mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src);
+ const char *name, void *data, int db);
void mic_free_card_irq(struct mic_irq *cookie, void *data);
u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
void mic_send_intr(struct mic_device *mdev, int doorbell);
+void mic_send_p2p_intr(int doorbell, struct mic_mw *mw);
int mic_db_to_irq(struct mic_driver *mdrv, int db);
u32 mic_ack_interrupt(struct mic_device *mdev);
void mic_hw_intr_init(struct mic_driver *mdrv);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index e98e537d68e3..77fd41781c2e 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -70,6 +70,41 @@ void mic_send_intr(struct mic_device *mdev, int doorbell)
(MIC_X100_SBOX_SDBIC0 + (4 * doorbell)));
}
+/*
+ * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
+ */
+static void mic_x100_send_sbox_intr(struct mic_mw *mw, int doorbell)
+{
+ u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
+ u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
+ apic_icr_offset);
+
+ /* for MIC we need to make sure we "hit" the send_icr bit (13) */
+ apicicr_low = (apicicr_low | (1 << 13));
+ /*
+ * Ensure that the interrupt is ordered w.r.t. previous stores
+ * to main memory. Fence instructions are not implemented in X100
+ * since execution is in order but a compiler barrier is still
+ * required.
+ */
+ wmb();
+ mic_mmio_write(mw, apicicr_low,
+ MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
+}
+
+static void mic_x100_send_rdmasr_intr(struct mic_mw *mw, int doorbell)
+{
+ int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
+ /*
+ * Ensure that the interrupt is ordered w.r.t. previous stores
+ * to main memory. Fence instructions are not implemented in X100
+ * since execution is in order but a compiler barrier is still
+ * required.
+ */
+ wmb();
+ mic_mmio_write(mw, 0, MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
+}
+
/**
* mic_ack_interrupt - Device specific interrupt handling.
* @mdev: pointer to mic_device instance
@@ -91,6 +126,18 @@ static inline int mic_get_rdmasr_irq(int index)
return MIC_X100_RDMASR_IRQ_BASE + index;
}
+void mic_send_p2p_intr(int db, struct mic_mw *mw)
+{
+ int rdmasr_index;
+
+ if (db < MIC_X100_NUM_SBOX_IRQ) {
+ mic_x100_send_sbox_intr(mw, db);
+ } else {
+ rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
+ mic_x100_send_rdmasr_intr(mw, rdmasr_index);
+ }
+}
+
/**
* mic_hw_intr_init - Initialize h/w specific interrupt
* information.
@@ -113,11 +160,15 @@ void mic_hw_intr_init(struct mic_driver *mdrv)
int mic_db_to_irq(struct mic_driver *mdrv, int db)
{
int rdmasr_index;
+
+ /*
+ * The total number of doorbell interrupts on the card are 16. Indices
+ * 0-8 falls in the SBOX category and 8-15 fall in the RDMASR category.
+ */
if (db < MIC_X100_NUM_SBOX_IRQ) {
return mic_get_sbox_irq(db);
} else {
- rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ +
- MIC_X100_RDMASR_IRQ_BASE;
+ rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
return mic_get_rdmasr_irq(rdmasr_index);
}
}
@@ -243,10 +294,16 @@ static void mic_platform_shutdown(struct platform_device *pdev)
mic_remove(pdev);
}
+static u64 mic_dma_mask = DMA_BIT_MASK(64);
+
static struct platform_device mic_platform_dev = {
.name = mic_driver_name,
.id = 0,
.num_resources = 0,
+ .dev = {
+ .dma_mask = &mic_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ },
};
static struct platform_driver __refdata mic_platform_driver = {
diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h
index d66ea55639c3..7e2224934ba8 100644
--- a/drivers/misc/mic/card/mic_x100.h
+++ b/drivers/misc/mic/card/mic_x100.h
@@ -35,6 +35,7 @@
#define MIC_X100_SBOX_SDBIC0 0x0000CC90
#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000
#define MIC_X100_SBOX_RDMASR0 0x0000B180
+#define MIC_X100_SBOX_APICICR0 0x0000A9D0
#define MIC_X100_MAX_DOORBELL_IDX 8
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
index 92999c2bbf82..0b58c46045dc 100644
--- a/drivers/misc/mic/common/mic_dev.h
+++ b/drivers/misc/mic/common/mic_dev.h
@@ -48,4 +48,7 @@ struct mic_mw {
#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1
#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2
+/* Maximum number of DMA channels */
+#define MIC_MAX_DMA_CHAN 4
+
#endif
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index d9fa609da061..e5f6a5e7bca1 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/kmod.h>
#include <linux/mic_common.h>
#include <linux/mic_bus.h>
@@ -29,6 +30,188 @@
#include "mic_smpt.h"
#include "mic_virtio.h"
+static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
+{
+ return dev_get_drvdata(scdev->dev.parent);
+}
+
+static void *__mic_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+ dma_addr_t tmp;
+ void *va = kmalloc(size, gfp);
+
+ if (va) {
+ tmp = mic_map_single(mdev, va, size);
+ if (dma_mapping_error(dev, tmp)) {
+ kfree(va);
+ va = NULL;
+ } else {
+ *dma_handle = tmp;
+ }
+ }
+ return va;
+}
+
+static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mic_unmap_single(mdev, dma_handle, size);
+ kfree(vaddr);
+}
+
+static dma_addr_t
+__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ void *va = phys_to_virt(page_to_phys(page)) + offset;
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_map_single(mdev, va, size);
+}
+
+static void
+__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mic_unmap_single(mdev, dma_addr, size);
+}
+
+static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+ struct scatterlist *s;
+ int i, j, ret;
+ dma_addr_t da;
+
+ ret = dma_map_sg(mdev->sdev->parent, sg, nents, dir);
+ if (ret <= 0)
+ return 0;
+
+ for_each_sg(sg, s, nents, i) {
+ da = mic_map(mdev, sg_dma_address(s) + s->offset, s->length);
+ if (!da)
+ goto err;
+ sg_dma_address(s) = da;
+ }
+ return nents;
+err:
+ for_each_sg(sg, s, i, j) {
+ mic_unmap(mdev, sg_dma_address(s), s->length);
+ sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
+ }
+ dma_unmap_sg(mdev->sdev->parent, sg, nents, dir);
+ return 0;
+}
+
+static void __mic_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+ struct scatterlist *s;
+ dma_addr_t da;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ da = mic_to_dma_addr(mdev, sg_dma_address(s));
+ mic_unmap(mdev, sg_dma_address(s), s->length);
+ sg_dma_address(s) = da;
+ }
+ dma_unmap_sg(mdev->sdev->parent, sg, nents, dir);
+}
+
+static struct dma_map_ops __mic_dma_ops = {
+ .alloc = __mic_dma_alloc,
+ .free = __mic_dma_free,
+ .map_page = __mic_dma_map_page,
+ .unmap_page = __mic_dma_unmap_page,
+ .map_sg = __mic_dma_map_sg,
+ .unmap_sg = __mic_dma_unmap_sg,
+};
+
+static struct mic_irq *
+___mic_request_irq(struct scif_hw_dev *scdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name,
+ void *data, int db)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_request_threaded_irq(mdev, func, NULL, name, data,
+ db, MIC_INTR_DB);
+}
+
+static void
+___mic_free_irq(struct scif_hw_dev *scdev,
+ struct mic_irq *cookie, void *data)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_free_irq(mdev, cookie, data);
+}
+
+static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mdev->ops->intr_workarounds(mdev);
+}
+
+static int ___mic_next_db(struct scif_hw_dev *scdev)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_next_db(mdev);
+}
+
+static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mdev->ops->send_intr(mdev, db);
+}
+
+static void __iomem *___mic_ioremap(struct scif_hw_dev *scdev,
+ phys_addr_t pa, size_t len)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mdev->aper.va + pa;
+}
+
+static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
+{
+ /* nothing to do */
+}
+
+static struct scif_hw_ops scif_hw_ops = {
+ .request_irq = ___mic_request_irq,
+ .free_irq = ___mic_free_irq,
+ .ack_interrupt = ___mic_ack_interrupt,
+ .next_db = ___mic_next_db,
+ .send_intr = ___mic_send_intr,
+ .ioremap = ___mic_ioremap,
+ .iounmap = ___mic_iounmap,
+};
+
static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
{
return dev_get_drvdata(mbdev->dev.parent);
@@ -127,6 +310,58 @@ void mic_bootparam_init(struct mic_device *mdev)
bootparam->h2c_config_db = -1;
bootparam->shutdown_status = 0;
bootparam->shutdown_card = 0;
+ /* Total nodes = number of MICs + 1 for self node */
+ bootparam->tot_nodes = atomic_read(&g_num_mics) + 1;
+ bootparam->node_id = mdev->id + 1;
+ bootparam->scif_host_dma_addr = 0x0;
+ bootparam->scif_card_dma_addr = 0x0;
+ bootparam->c2h_scif_db = -1;
+ bootparam->h2c_scif_db = -1;
+}
+
+/**
+ * mic_request_dma_chans - Request DMA channels
+ * @mdev: pointer to mic_device instance
+ *
+ * returns number of DMA channels acquired
+ */
+static int mic_request_dma_chans(struct mic_device *mdev)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ request_module("mic_x100_dma");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ do {
+ chan = dma_request_channel(mask, mdev->ops->dma_filter,
+ mdev->sdev->parent);
+ if (chan) {
+ mdev->dma_ch[mdev->num_dma_ch++] = chan;
+ if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
+ break;
+ }
+ } while (chan);
+ dev_info(mdev->sdev->parent, "DMA channels # %d\n", mdev->num_dma_ch);
+ return mdev->num_dma_ch;
+}
+
+/**
+ * mic_free_dma_chans - release DMA channels
+ * @mdev: pointer to mic_device instance
+ *
+ * returns none
+ */
+static void mic_free_dma_chans(struct mic_device *mdev)
+{
+ int i = 0;
+
+ for (i = 0; i < mdev->num_dma_ch; i++) {
+ dma_release_channel(mdev->dma_ch[i]);
+ mdev->dma_ch[i] = NULL;
+ }
+ mdev->num_dma_ch = 0;
}
/**
@@ -141,6 +376,7 @@ int mic_start(struct mic_device *mdev, const char *buf)
{
int rc;
mutex_lock(&mdev->mic_mutex);
+ mic_bootparam_init(mdev);
retry:
if (MIC_OFFLINE != mdev->state) {
rc = -EINVAL;
@@ -161,14 +397,22 @@ retry:
rc = PTR_ERR(mdev->dma_mbdev);
goto unlock_ret;
}
- mdev->dma_ch = mic_request_dma_chan(mdev);
- if (!mdev->dma_ch) {
- rc = -ENXIO;
+ if (!mic_request_dma_chans(mdev)) {
+ rc = -ENODEV;
goto dma_remove;
}
+ mdev->scdev = scif_register_device(mdev->sdev->parent, MIC_SCIF_DEV,
+ &__mic_dma_ops, &scif_hw_ops,
+ mdev->id + 1, 0, &mdev->mmio,
+ &mdev->aper, mdev->dp, NULL,
+ mdev->dma_ch, mdev->num_dma_ch);
+ if (IS_ERR(mdev->scdev)) {
+ rc = PTR_ERR(mdev->scdev);
+ goto dma_free;
+ }
rc = mdev->ops->load_mic_fw(mdev, buf);
if (rc)
- goto dma_release;
+ goto scif_remove;
mic_smpt_restore(mdev);
mic_intr_restore(mdev);
mdev->intr_ops->enable_interrupts(mdev);
@@ -177,8 +421,10 @@ retry:
mdev->ops->send_firmware_intr(mdev);
mic_set_state(mdev, MIC_ONLINE);
goto unlock_ret;
-dma_release:
- dma_release_channel(mdev->dma_ch);
+scif_remove:
+ scif_unregister_device(mdev->scdev);
+dma_free:
+ mic_free_dma_chans(mdev);
dma_remove:
mbus_unregister_device(mdev->dma_mbdev);
unlock_ret:
@@ -197,11 +443,9 @@ void mic_stop(struct mic_device *mdev, bool force)
{
mutex_lock(&mdev->mic_mutex);
if (MIC_OFFLINE != mdev->state || force) {
+ scif_unregister_device(mdev->scdev);
mic_virtio_reset_devices(mdev);
- if (mdev->dma_ch) {
- dma_release_channel(mdev->dma_ch);
- mdev->dma_ch = NULL;
- }
+ mic_free_dma_chans(mdev);
mbus_unregister_device(mdev->dma_mbdev);
mic_bootparam_init(mdev);
mic_reset(mdev);
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 687e9aacf3bb..3c9ea4896f3c 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -214,6 +214,19 @@ static int mic_dp_show(struct seq_file *s, void *pos)
bootparam->shutdown_status);
seq_printf(s, "Bootparam: shutdown_card %d\n",
bootparam->shutdown_card);
+ seq_printf(s, "Bootparam: tot_nodes %d\n",
+ bootparam->tot_nodes);
+ seq_printf(s, "Bootparam: node_id %d\n",
+ bootparam->node_id);
+ seq_printf(s, "Bootparam: c2h_scif_db %d\n",
+ bootparam->c2h_scif_db);
+ seq_printf(s, "Bootparam: h2c_scif_db %d\n",
+ bootparam->h2c_scif_db);
+ seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
+ bootparam->scif_host_dma_addr);
+ seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
+ bootparam->scif_card_dma_addr);
+
for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
i += mic_total_desc_size(d)) {
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 016bd15a7bd1..01a7555aa648 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -27,7 +27,7 @@
#include <linux/irqreturn.h>
#include <linux/dmaengine.h>
#include <linux/mic_bus.h>
-
+#include "../bus/scif_bus.h"
#include "mic_intr.h"
/* The maximum number of MIC devices supported in a single host system. */
@@ -90,7 +90,9 @@ enum mic_stepping {
* @vdev_list: list of virtio devices.
* @pm_notifier: Handles PM notifications from the OS.
* @dma_mbdev: MIC BUS DMA device.
- * @dma_ch: DMA channel reserved by this driver for use by virtio devices.
+ * @dma_ch - Array of DMA channels
+ * @num_dma_ch - Number of DMA channels available
+ * @scdev: SCIF device on the SCIF virtual bus.
*/
struct mic_device {
struct mic_mw mmio;
@@ -129,7 +131,9 @@ struct mic_device {
struct list_head vdev_list;
struct notifier_block pm_notifier;
struct mbus_device *dma_mbdev;
- struct dma_chan *dma_ch;
+ struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
+ int num_dma_ch;
+ struct scif_hw_dev *scdev;
};
/**
@@ -228,4 +232,5 @@ void mic_exit_debugfs(void);
void mic_prepare_suspend(struct mic_device *mdev);
void mic_complete_resume(struct mic_device *mdev);
void mic_suspend(struct mic_device *mdev);
+extern atomic_t g_num_mics;
#endif
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
index 9f783d4ad7f1..cce28824db8a 100644
--- a/drivers/misc/mic/host/mic_intr.h
+++ b/drivers/misc/mic/host/mic_intr.h
@@ -28,8 +28,9 @@
* 3 for virtio network, console and block devices.
* 1 for card shutdown notifications.
* 4 for host owned DMA channels.
+ * 1 for SCIF
*/
-#define MIC_MIN_MSIX 8
+#define MIC_MIN_MSIX 9
#define MIC_NUM_OFFSETS 32
/**
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index ab37a3117d23..456462932151 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -67,6 +67,8 @@ static struct ida g_mic_ida;
static struct class *g_mic_class;
/* Base device node number for MIC devices */
static dev_t g_mic_devno;
+/* Track the total number of MIC devices */
+atomic_t g_num_mics;
static const struct file_operations mic_fops = {
.open = mic_open,
@@ -408,6 +410,7 @@ static int mic_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc);
goto cleanup_debug_dir;
}
+ atomic_inc(&g_num_mics);
return 0;
cleanup_debug_dir:
mic_delete_debug_dir(mdev);
@@ -459,6 +462,7 @@ static void mic_remove(struct pci_dev *pdev)
return;
mic_stop(mdev, false);
+ atomic_dec(&g_num_mics);
cdev_del(&mdev->cdev);
mic_delete_debug_dir(mdev);
mutex_lock(&mdev->mic_mutex);
@@ -478,6 +482,7 @@ static void mic_remove(struct pci_dev *pdev)
ida_simple_remove(&g_mic_ida, mdev->id);
kfree(mdev);
}
+
static struct pci_driver mic_driver = {
.name = mic_driver_name,
.id_table = mic_pci_tbl,
@@ -512,6 +517,7 @@ static int __init mic_init(void)
}
return ret;
cleanup_debugfs:
+ ida_destroy(&g_mic_ida);
mic_exit_debugfs();
class_destroy(g_mic_class);
cleanup_chrdev:
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
index fae474c4899e..cec82034875f 100644
--- a/drivers/misc/mic/host/mic_smpt.c
+++ b/drivers/misc/mic/host/mic_smpt.c
@@ -174,8 +174,7 @@ static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
*
* returns a DMA address.
*/
-static dma_addr_t
-mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
+dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
{
struct mic_smpt_info *smpt_info = mdev->smpt;
int spt;
@@ -214,7 +213,7 @@ dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
if (!size || size > mic_max_system_memory(mdev))
return mic_addr;
- ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL);
+ ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
if (!ref)
return mic_addr;
@@ -271,7 +270,7 @@ void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
}
spt = mic_sys_addr_to_smpt(mdev, mic_addr);
- ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL);
+ ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
if (!ref)
return;
diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h
index 51970abfe7df..68721c6e7455 100644
--- a/drivers/misc/mic/host/mic_smpt.h
+++ b/drivers/misc/mic/host/mic_smpt.h
@@ -78,6 +78,7 @@ void mic_unmap_single(struct mic_device *mdev,
dma_addr_t mic_map(struct mic_device *mdev,
dma_addr_t dma_addr, size_t size);
void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
+dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr);
/**
* mic_map_error - Check a MIC address for errors.
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index a020e4eb435a..cc08e9f733c9 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -40,7 +40,7 @@ static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
{
int err = 0;
struct dma_async_tx_descriptor *tx;
- struct dma_chan *mic_ch = mdev->dma_ch;
+ struct dma_chan *mic_ch = mdev->dma_ch[0];
if (!mic_ch) {
err = -EBUSY;
@@ -80,7 +80,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
struct mic_device *mdev = mvdev->mdev;
void __iomem *dbuf = mdev->aper.va + daddr;
struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
+ size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
size_t dma_offset;
size_t partlen;
int err;
@@ -129,7 +129,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
struct mic_device *mdev = mvdev->mdev;
void __iomem *dbuf = mdev->aper.va + daddr;
struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
+ size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
size_t partlen;
int err;
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index b7a21e11dcdf..3341e90dede4 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -167,8 +167,7 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
if (doorbell < MIC_X100_NUM_SBOX_IRQ) {
mic_x100_send_sbox_intr(mdev, doorbell);
} else {
- rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ +
- MIC_X100_RDMASR_IRQ_BASE;
+ rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ;
mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
}
}
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
new file mode 100644
index 000000000000..bf10bb7e2b91
--- /dev/null
+++ b/drivers/misc/mic/scif/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile - SCIF driver.
+# Copyright(c) 2014, Intel Corporation.
+#
+obj-$(CONFIG_SCIF) += scif.o
+scif-objs := scif_main.o
+scif-objs += scif_peer_bus.o
+scif-objs += scif_ports.o
+scif-objs += scif_debugfs.o
+scif-objs += scif_fd.o
+scif-objs += scif_api.o
+scif-objs += scif_epd.o
+scif-objs += scif_rb.o
+scif-objs += scif_nodeqp.o
+scif-objs += scif_nm.o
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
new file mode 100644
index 000000000000..f39d3135a9ef
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -0,0 +1,1276 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/scif.h>
+#include "scif_main.h"
+#include "scif_map.h"
+
+static const char * const scif_ep_states[] = {
+ "Unbound",
+ "Bound",
+ "Listening",
+ "Connected",
+ "Connecting",
+ "Mapping",
+ "Closing",
+ "Close Listening",
+ "Disconnected",
+ "Zombie"};
+
+enum conn_async_state {
+ ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
+ ASYNC_CONN_INPROGRESS, /* async connect in progress */
+ ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
+};
+
+scif_epd_t scif_open(void)
+{
+ struct scif_endpt *ep;
+
+ might_sleep();
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ goto err_ep_alloc;
+
+ ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
+ if (!ep->qp_info.qp)
+ goto err_qp_alloc;
+
+ spin_lock_init(&ep->lock);
+ mutex_init(&ep->sendlock);
+ mutex_init(&ep->recvlock);
+
+ ep->state = SCIFEP_UNBOUND;
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI open: ep %p success\n", ep);
+ return ep;
+
+err_qp_alloc:
+ kfree(ep);
+err_ep_alloc:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(scif_open);
+
+/*
+ * scif_disconnect_ep - Disconnects the endpoint if found
+ * @epd: The end point returned from scif_open()
+ */
+static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
+{
+ struct scifmsg msg;
+ struct scif_endpt *fep = NULL;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+ int err;
+
+ /*
+ * Wake up any threads blocked in send()/recv() before closing
+ * out the connection. Grabbing and releasing the send/recv lock
+ * will ensure that any blocked senders/receivers have exited for
+ * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
+ * close. Ring 3 endpoints are not affected since close will not
+ * be called while there are IOCTLs executing.
+ */
+ wake_up_interruptible(&ep->sendwq);
+ wake_up_interruptible(&ep->recvwq);
+ mutex_lock(&ep->sendlock);
+ mutex_unlock(&ep->sendlock);
+ mutex_lock(&ep->recvlock);
+ mutex_unlock(&ep->recvlock);
+
+ /* Remove from the connected list */
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep) {
+ list_del(pos);
+ fep = tmpep;
+ spin_lock(&ep->lock);
+ break;
+ }
+ }
+
+ if (!fep) {
+ /*
+ * The other side has completed the disconnect before
+ * the end point can be removed from the list. Therefore
+ * the ep lock is not locked, traverse the disconnected
+ * list to find the endpoint and release the conn lock.
+ */
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep) {
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+ return NULL;
+ }
+
+ init_completion(&ep->discon);
+ msg.uop = SCIF_DISCNCT;
+ msg.src = ep->port;
+ msg.dst = ep->peer;
+ msg.payload[0] = (u64)ep;
+ msg.payload[1] = ep->remote_ep;
+
+ err = scif_nodeqp_send(ep->remote_dev, &msg);
+ spin_unlock(&ep->lock);
+ mutex_unlock(&scif_info.connlock);
+
+ if (!err)
+ /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
+ wait_for_completion_timeout(&ep->discon,
+ SCIF_NODE_ALIVE_TIMEOUT);
+ return ep;
+}
+
+int scif_close(scif_epd_t epd)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+ enum scif_epd_state oldstate;
+ bool flush_conn;
+
+ dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
+ ep, scif_ep_states[ep->state]);
+ might_sleep();
+ spin_lock(&ep->lock);
+ flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
+ spin_unlock(&ep->lock);
+
+ if (flush_conn)
+ flush_work(&scif_info.conn_work);
+
+ spin_lock(&ep->lock);
+ oldstate = ep->state;
+
+ ep->state = SCIFEP_CLOSING;
+
+ switch (oldstate) {
+ case SCIFEP_ZOMBIE:
+ case SCIFEP_DISCONNECTED:
+ spin_unlock(&ep->lock);
+ /* Remove from the disconnected list */
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep) {
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+ break;
+ case SCIFEP_UNBOUND:
+ case SCIFEP_BOUND:
+ case SCIFEP_CONNECTING:
+ spin_unlock(&ep->lock);
+ break;
+ case SCIFEP_MAPPING:
+ case SCIFEP_CONNECTED:
+ case SCIFEP_CLOSING:
+ {
+ spin_unlock(&ep->lock);
+ scif_disconnect_ep(ep);
+ break;
+ }
+ case SCIFEP_LISTENING:
+ case SCIFEP_CLLISTEN:
+ {
+ struct scif_conreq *conreq;
+ struct scifmsg msg;
+ struct scif_endpt *aep;
+
+ spin_unlock(&ep->lock);
+ spin_lock(&scif_info.eplock);
+
+ /* remove from listen list */
+ list_for_each_safe(pos, tmpq, &scif_info.listen) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep)
+ list_del(pos);
+ }
+ /* Remove any dangling accepts */
+ while (ep->acceptcnt) {
+ aep = list_first_entry(&ep->li_accept,
+ struct scif_endpt, liacceptlist);
+ list_del(&aep->liacceptlist);
+ scif_put_port(aep->port.port);
+ list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
+ tmpep = list_entry(pos, struct scif_endpt,
+ miacceptlist);
+ if (tmpep == aep) {
+ list_del(pos);
+ break;
+ }
+ }
+ spin_unlock(&scif_info.eplock);
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, list);
+ if (tmpep == aep) {
+ list_del(pos);
+ break;
+ }
+ }
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, list);
+ if (tmpep == aep) {
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+ scif_teardown_ep(aep);
+ spin_lock(&scif_info.eplock);
+ scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
+ ep->acceptcnt--;
+ }
+
+ spin_lock(&ep->lock);
+ spin_unlock(&scif_info.eplock);
+
+ /* Remove and reject any pending connection requests. */
+ while (ep->conreqcnt) {
+ conreq = list_first_entry(&ep->conlist,
+ struct scif_conreq, list);
+ list_del(&conreq->list);
+
+ msg.uop = SCIF_CNCT_REJ;
+ msg.dst.node = conreq->msg.src.node;
+ msg.dst.port = conreq->msg.src.port;
+ msg.payload[0] = conreq->msg.payload[0];
+ msg.payload[1] = conreq->msg.payload[1];
+ /*
+ * No Error Handling on purpose for scif_nodeqp_send().
+ * If the remote node is lost we still want free the
+ * connection requests on the self node.
+ */
+ scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
+ &msg);
+ ep->conreqcnt--;
+ kfree(conreq);
+ }
+
+ spin_unlock(&ep->lock);
+ /* If a kSCIF accept is waiting wake it up */
+ wake_up_interruptible(&ep->conwq);
+ break;
+ }
+ }
+ scif_put_port(ep->port.port);
+ scif_teardown_ep(ep);
+ scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scif_close);
+
+/**
+ * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
+ * accept new connections.
+ * @epd: The end point returned from scif_open()
+ */
+int __scif_flush(scif_epd_t epd)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+
+ switch (ep->state) {
+ case SCIFEP_LISTENING:
+ {
+ ep->state = SCIFEP_CLLISTEN;
+
+ /* If an accept is waiting wake it up */
+ wake_up_interruptible(&ep->conwq);
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+int scif_bind(scif_epd_t epd, u16 pn)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int ret = 0;
+ int tmp;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI bind: ep %p %s requested port number %d\n",
+ ep, scif_ep_states[ep->state], pn);
+ if (pn) {
+ /*
+ * Similar to IETF RFC 1700, SCIF ports below
+ * SCIF_ADMIN_PORT_END can only be bound by system (or root)
+ * processes or by processes executed by privileged users.
+ */
+ if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
+ ret = -EACCES;
+ goto scif_bind_admin_exit;
+ }
+ }
+
+ spin_lock(&ep->lock);
+ if (ep->state == SCIFEP_BOUND) {
+ ret = -EINVAL;
+ goto scif_bind_exit;
+ } else if (ep->state != SCIFEP_UNBOUND) {
+ ret = -EISCONN;
+ goto scif_bind_exit;
+ }
+
+ if (pn) {
+ tmp = scif_rsrv_port(pn);
+ if (tmp != pn) {
+ ret = -EINVAL;
+ goto scif_bind_exit;
+ }
+ } else {
+ pn = scif_get_new_port();
+ if (!pn) {
+ ret = -ENOSPC;
+ goto scif_bind_exit;
+ }
+ }
+
+ ep->state = SCIFEP_BOUND;
+ ep->port.node = scif_info.nodeid;
+ ep->port.port = pn;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
+ ret = pn;
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI bind: bound to port number %d\n", pn);
+scif_bind_exit:
+ spin_unlock(&ep->lock);
+scif_bind_admin_exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scif_bind);
+
+int scif_listen(scif_epd_t epd, int backlog)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
+ spin_lock(&ep->lock);
+ switch (ep->state) {
+ case SCIFEP_ZOMBIE:
+ case SCIFEP_CLOSING:
+ case SCIFEP_CLLISTEN:
+ case SCIFEP_UNBOUND:
+ case SCIFEP_DISCONNECTED:
+ spin_unlock(&ep->lock);
+ return -EINVAL;
+ case SCIFEP_LISTENING:
+ case SCIFEP_CONNECTED:
+ case SCIFEP_CONNECTING:
+ case SCIFEP_MAPPING:
+ spin_unlock(&ep->lock);
+ return -EISCONN;
+ case SCIFEP_BOUND:
+ break;
+ }
+
+ ep->state = SCIFEP_LISTENING;
+ ep->backlog = backlog;
+
+ ep->conreqcnt = 0;
+ ep->acceptcnt = 0;
+ INIT_LIST_HEAD(&ep->conlist);
+ init_waitqueue_head(&ep->conwq);
+ INIT_LIST_HEAD(&ep->li_accept);
+ spin_unlock(&ep->lock);
+
+ /*
+ * Listen status is complete so delete the qp information not needed
+ * on a listen before placing on the list of listening ep's
+ */
+ scif_teardown_ep(ep);
+ ep->qp_info.qp = NULL;
+
+ spin_lock(&scif_info.eplock);
+ list_add_tail(&ep->list, &scif_info.listen);
+ spin_unlock(&scif_info.eplock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scif_listen);
+
+/*
+ ************************************************************************
+ * SCIF connection flow:
+ *
+ * 1) A SCIF listening endpoint can call scif_accept(..) to wait for SCIF
+ * connections via a SCIF_CNCT_REQ message
+ * 2) A SCIF endpoint can initiate a SCIF connection by calling
+ * scif_connect(..) which calls scif_setup_qp_connect(..) which
+ * allocates the local qp for the endpoint ring buffer and then sends
+ * a SCIF_CNCT_REQ to the remote node and waits for a SCIF_CNCT_GNT or
+ * a SCIF_CNCT_REJ message
+ * 3) The peer node handles a SCIF_CNCT_REQ via scif_cnctreq_resp(..) which
+ * wakes up any threads blocked in step 1 or sends a SCIF_CNCT_REJ
+ * message otherwise
+ * 4) A thread blocked waiting for incoming connections allocates its local
+ * endpoint QP and ring buffer following which it sends a SCIF_CNCT_GNT
+ * and waits for a SCIF_CNCT_GNT(N)ACK. If the allocation fails then
+ * the node sends a SCIF_CNCT_REJ message
+ * 5) Upon receipt of a SCIF_CNCT_GNT or a SCIF_CNCT_REJ message the
+ * connecting endpoint is woken up as part of handling
+ * scif_cnctgnt_resp(..) following which it maps the remote endpoints'
+ * QP, updates its outbound QP and sends a SCIF_CNCT_GNTACK message on
+ * success or a SCIF_CNCT_GNTNACK message on failure and completes
+ * the scif_connect(..) API
+ * 6) Upon receipt of a SCIF_CNCT_GNT(N)ACK the accepting endpoint blocked
+ * in step 4 is woken up and completes the scif_accept(..) API
+ * 7) The SCIF connection is now established between the two SCIF endpoints.
+ */
+static int scif_conn_func(struct scif_endpt *ep)
+{
+ int err = 0;
+ struct scifmsg msg;
+ struct device *spdev;
+
+ /* Initiate the first part of the endpoint QP setup */
+ err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
+ SCIF_ENDPT_QP_SIZE, ep->remote_dev);
+ if (err) {
+ dev_err(&ep->remote_dev->sdev->dev,
+ "%s err %d qp_offset 0x%llx\n",
+ __func__, err, ep->qp_info.qp_offset);
+ ep->state = SCIFEP_BOUND;
+ goto connect_error_simple;
+ }
+
+ spdev = scif_get_peer_dev(ep->remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto cleanup_qp;
+ }
+ /* Format connect message and send it */
+ msg.src = ep->port;
+ msg.dst = ep->conn_port;
+ msg.uop = SCIF_CNCT_REQ;
+ msg.payload[0] = (u64)ep;
+ msg.payload[1] = ep->qp_info.qp_offset;
+ err = _scif_nodeqp_send(ep->remote_dev, &msg);
+ if (err)
+ goto connect_error_dec;
+ scif_put_peer_dev(spdev);
+ /*
+ * Wait for the remote node to respond with SCIF_CNCT_GNT or
+ * SCIF_CNCT_REJ message.
+ */
+ err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING,
+ SCIF_NODE_ALIVE_TIMEOUT);
+ if (!err) {
+ dev_err(&ep->remote_dev->sdev->dev,
+ "%s %d timeout\n", __func__, __LINE__);
+ ep->state = SCIFEP_BOUND;
+ }
+ spdev = scif_get_peer_dev(ep->remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto cleanup_qp;
+ }
+ if (ep->state == SCIFEP_MAPPING) {
+ err = scif_setup_qp_connect_response(ep->remote_dev,
+ ep->qp_info.qp,
+ ep->qp_info.gnt_pld);
+ /*
+ * If the resource to map the queue are not available then
+ * we need to tell the other side to terminate the accept
+ */
+ if (err) {
+ dev_err(&ep->remote_dev->sdev->dev,
+ "%s %d err %d\n", __func__, __LINE__, err);
+ msg.uop = SCIF_CNCT_GNTNACK;
+ msg.payload[0] = ep->remote_ep;
+ _scif_nodeqp_send(ep->remote_dev, &msg);
+ ep->state = SCIFEP_BOUND;
+ goto connect_error_dec;
+ }
+
+ msg.uop = SCIF_CNCT_GNTACK;
+ msg.payload[0] = ep->remote_ep;
+ err = _scif_nodeqp_send(ep->remote_dev, &msg);
+ if (err) {
+ ep->state = SCIFEP_BOUND;
+ goto connect_error_dec;
+ }
+ ep->state = SCIFEP_CONNECTED;
+ mutex_lock(&scif_info.connlock);
+ list_add_tail(&ep->list, &scif_info.connected);
+ mutex_unlock(&scif_info.connlock);
+ dev_dbg(&ep->remote_dev->sdev->dev,
+ "SCIFAPI connect: ep %p connected\n", ep);
+ } else if (ep->state == SCIFEP_BOUND) {
+ dev_dbg(&ep->remote_dev->sdev->dev,
+ "SCIFAPI connect: ep %p connection refused\n", ep);
+ err = -ECONNREFUSED;
+ goto connect_error_dec;
+ }
+ scif_put_peer_dev(spdev);
+ return err;
+connect_error_dec:
+ scif_put_peer_dev(spdev);
+cleanup_qp:
+ scif_cleanup_ep_qp(ep);
+connect_error_simple:
+ return err;
+}
+
+/*
+ * scif_conn_handler:
+ *
+ * Workqueue handler for servicing non-blocking SCIF connect
+ *
+ */
+void scif_conn_handler(struct work_struct *work)
+{
+ struct scif_endpt *ep;
+
+ do {
+ ep = NULL;
+ spin_lock(&scif_info.nb_connect_lock);
+ if (!list_empty(&scif_info.nb_connect_list)) {
+ ep = list_first_entry(&scif_info.nb_connect_list,
+ struct scif_endpt, conn_list);
+ list_del(&ep->conn_list);
+ }
+ spin_unlock(&scif_info.nb_connect_lock);
+ if (ep)
+ ep->conn_err = scif_conn_func(ep);
+ } while (ep);
+}
+
+int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int err = 0;
+ struct scif_dev *remote_dev;
+ struct device *spdev;
+
+ dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
+ scif_ep_states[ep->state]);
+
+ if (!scif_dev || dst->node > scif_info.maxid)
+ return -ENODEV;
+
+ might_sleep();
+
+ remote_dev = &scif_dev[dst->node];
+ spdev = scif_get_peer_dev(remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ return err;
+ }
+
+ spin_lock(&ep->lock);
+ switch (ep->state) {
+ case SCIFEP_ZOMBIE:
+ case SCIFEP_CLOSING:
+ err = -EINVAL;
+ break;
+ case SCIFEP_DISCONNECTED:
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
+ ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
+ else
+ err = -EINVAL;
+ break;
+ case SCIFEP_LISTENING:
+ case SCIFEP_CLLISTEN:
+ err = -EOPNOTSUPP;
+ break;
+ case SCIFEP_CONNECTING:
+ case SCIFEP_MAPPING:
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
+ err = -EINPROGRESS;
+ else
+ err = -EISCONN;
+ break;
+ case SCIFEP_CONNECTED:
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
+ ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
+ else
+ err = -EISCONN;
+ break;
+ case SCIFEP_UNBOUND:
+ ep->port.port = scif_get_new_port();
+ if (!ep->port.port) {
+ err = -ENOSPC;
+ } else {
+ ep->port.node = scif_info.nodeid;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
+ }
+ /* Fall through */
+ case SCIFEP_BOUND:
+ /*
+ * If a non-blocking connect has been already initiated
+ * (conn_async_state is either ASYNC_CONN_INPROGRESS or
+ * ASYNC_CONN_FLUSH_WORK), the end point could end up in
+ * SCIF_BOUND due an error in the connection process
+ * (e.g., connection refused) If conn_async_state is
+ * ASYNC_CONN_INPROGRESS - transition to ASYNC_CONN_FLUSH_WORK
+ * so that the error status can be collected. If the state is
+ * already ASYNC_CONN_FLUSH_WORK - then set the error to
+ * EINPROGRESS since some other thread is waiting to collect
+ * error status.
+ */
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
+ ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
+ } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
+ err = -EINPROGRESS;
+ } else {
+ ep->conn_port = *dst;
+ init_waitqueue_head(&ep->sendwq);
+ init_waitqueue_head(&ep->recvwq);
+ init_waitqueue_head(&ep->conwq);
+ ep->conn_async_state = 0;
+
+ if (unlikely(non_block))
+ ep->conn_async_state = ASYNC_CONN_INPROGRESS;
+ }
+ break;
+ }
+
+ if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK)
+ goto connect_simple_unlock1;
+
+ ep->state = SCIFEP_CONNECTING;
+ ep->remote_dev = &scif_dev[dst->node];
+ ep->qp_info.qp->magic = SCIFEP_MAGIC;
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
+ spin_lock(&scif_info.nb_connect_lock);
+ list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
+ spin_unlock(&scif_info.nb_connect_lock);
+ err = -EINPROGRESS;
+ schedule_work(&scif_info.conn_work);
+ }
+connect_simple_unlock1:
+ spin_unlock(&ep->lock);
+ scif_put_peer_dev(spdev);
+ if (err) {
+ return err;
+ } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
+ flush_work(&scif_info.conn_work);
+ err = ep->conn_err;
+ spin_lock(&ep->lock);
+ ep->conn_async_state = ASYNC_CONN_IDLE;
+ spin_unlock(&ep->lock);
+ } else {
+ err = scif_conn_func(ep);
+ }
+ return err;
+}
+
+int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
+{
+ return __scif_connect(epd, dst, false);
+}
+EXPORT_SYMBOL_GPL(scif_connect);
+
+/**
+ * scif_accept() - Accept a connection request from the remote node
+ *
+ * The function accepts a connection request from the remote node. Successful
+ * complete is indicate by a new end point being created and passed back
+ * to the caller for future reference.
+ *
+ * Upon successful complete a zero will be returned and the peer information
+ * will be filled in.
+ *
+ * If the end point is not in the listening state -EINVAL will be returned.
+ *
+ * If during the connection sequence resource allocation fails the -ENOMEM
+ * will be returned.
+ *
+ * If the function is called with the ASYNC flag set and no connection requests
+ * are pending it will return -EAGAIN.
+ *
+ * If the remote side is not sending any connection requests the caller may
+ * terminate this function with a signal. If so a -EINTR will be returned.
+ */
+int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
+ scif_epd_t *newepd, int flags)
+{
+ struct scif_endpt *lep = (struct scif_endpt *)epd;
+ struct scif_endpt *cep;
+ struct scif_conreq *conreq;
+ struct scifmsg msg;
+ int err;
+ struct device *spdev;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]);
+
+ if (flags & ~SCIF_ACCEPT_SYNC)
+ return -EINVAL;
+
+ if (!peer || !newepd)
+ return -EINVAL;
+
+ might_sleep();
+ spin_lock(&lep->lock);
+ if (lep->state != SCIFEP_LISTENING) {
+ spin_unlock(&lep->lock);
+ return -EINVAL;
+ }
+
+ if (!lep->conreqcnt && !(flags & SCIF_ACCEPT_SYNC)) {
+ /* No connection request present and we do not want to wait */
+ spin_unlock(&lep->lock);
+ return -EAGAIN;
+ }
+
+ lep->files = current->files;
+retry_connection:
+ spin_unlock(&lep->lock);
+ /* Wait for the remote node to send us a SCIF_CNCT_REQ */
+ err = wait_event_interruptible(lep->conwq,
+ (lep->conreqcnt ||
+ (lep->state != SCIFEP_LISTENING)));
+ if (err)
+ return err;
+
+ if (lep->state != SCIFEP_LISTENING)
+ return -EINTR;
+
+ spin_lock(&lep->lock);
+
+ if (!lep->conreqcnt)
+ goto retry_connection;
+
+ /* Get the first connect request off the list */
+ conreq = list_first_entry(&lep->conlist, struct scif_conreq, list);
+ list_del(&conreq->list);
+ lep->conreqcnt--;
+ spin_unlock(&lep->lock);
+
+ /* Fill in the peer information */
+ peer->node = conreq->msg.src.node;
+ peer->port = conreq->msg.src.port;
+
+ cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ if (!cep) {
+ err = -ENOMEM;
+ goto scif_accept_error_epalloc;
+ }
+ spin_lock_init(&cep->lock);
+ mutex_init(&cep->sendlock);
+ mutex_init(&cep->recvlock);
+ cep->state = SCIFEP_CONNECTING;
+ cep->remote_dev = &scif_dev[peer->node];
+ cep->remote_ep = conreq->msg.payload[0];
+
+ cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
+ if (!cep->qp_info.qp) {
+ err = -ENOMEM;
+ goto scif_accept_error_qpalloc;
+ }
+
+ cep->qp_info.qp->magic = SCIFEP_MAGIC;
+ spdev = scif_get_peer_dev(cep->remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto scif_accept_error_map;
+ }
+ err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset,
+ conreq->msg.payload[1], SCIF_ENDPT_QP_SIZE,
+ cep->remote_dev);
+ if (err) {
+ dev_dbg(&cep->remote_dev->sdev->dev,
+ "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n",
+ lep, cep, err, cep->qp_info.qp_offset);
+ scif_put_peer_dev(spdev);
+ goto scif_accept_error_map;
+ }
+
+ cep->port.node = lep->port.node;
+ cep->port.port = lep->port.port;
+ cep->peer.node = peer->node;
+ cep->peer.port = peer->port;
+ init_waitqueue_head(&cep->sendwq);
+ init_waitqueue_head(&cep->recvwq);
+ init_waitqueue_head(&cep->conwq);
+
+ msg.uop = SCIF_CNCT_GNT;
+ msg.src = cep->port;
+ msg.payload[0] = cep->remote_ep;
+ msg.payload[1] = cep->qp_info.qp_offset;
+ msg.payload[2] = (u64)cep;
+
+ err = _scif_nodeqp_send(cep->remote_dev, &msg);
+ scif_put_peer_dev(spdev);
+ if (err)
+ goto scif_accept_error_map;
+retry:
+ /* Wait for the remote node to respond with SCIF_CNCT_GNT(N)ACK */
+ err = wait_event_timeout(cep->conwq, cep->state != SCIFEP_CONNECTING,
+ SCIF_NODE_ACCEPT_TIMEOUT);
+ if (!err && scifdev_alive(cep))
+ goto retry;
+ err = !err ? -ENODEV : 0;
+ if (err)
+ goto scif_accept_error_map;
+ kfree(conreq);
+
+ spin_lock(&cep->lock);
+
+ if (cep->state == SCIFEP_CLOSING) {
+ /*
+ * Remote failed to allocate resources and NAKed the grant.
+ * There is at this point nothing referencing the new end point.
+ */
+ spin_unlock(&cep->lock);
+ scif_teardown_ep(cep);
+ kfree(cep);
+
+ /* If call with sync flag then go back and wait. */
+ if (flags & SCIF_ACCEPT_SYNC) {
+ spin_lock(&lep->lock);
+ goto retry_connection;
+ }
+ return -EAGAIN;
+ }
+
+ scif_get_port(cep->port.port);
+ *newepd = (scif_epd_t)cep;
+ spin_unlock(&cep->lock);
+ return 0;
+scif_accept_error_map:
+ scif_teardown_ep(cep);
+scif_accept_error_qpalloc:
+ kfree(cep);
+scif_accept_error_epalloc:
+ msg.uop = SCIF_CNCT_REJ;
+ msg.dst.node = conreq->msg.src.node;
+ msg.dst.port = conreq->msg.src.port;
+ msg.payload[0] = conreq->msg.payload[0];
+ msg.payload[1] = conreq->msg.payload[1];
+ scif_nodeqp_send(&scif_dev[conreq->msg.src.node], &msg);
+ kfree(conreq);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scif_accept);
+
+/*
+ * scif_msg_param_check:
+ * @epd: The end point returned from scif_open()
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * Validate parameters for messaging APIs scif_send(..)/scif_recv(..).
+ */
+static inline int scif_msg_param_check(scif_epd_t epd, int len, int flags)
+{
+ int ret = -EINVAL;
+
+ if (len < 0)
+ goto err_ret;
+ if (flags && (!(flags & SCIF_RECV_BLOCK)))
+ goto err_ret;
+ ret = 0;
+err_ret:
+ return ret;
+}
+
+static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ struct scifmsg notif_msg;
+ int curr_xfer_len = 0, sent_len = 0, write_count;
+ int ret = 0;
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (flags & SCIF_SEND_BLOCK)
+ might_sleep();
+
+ spin_lock(&ep->lock);
+ while (sent_len != len && SCIFEP_CONNECTED == ep->state) {
+ write_count = scif_rb_space(&qp->outbound_q);
+ if (write_count) {
+ /* Best effort to send as much data as possible */
+ curr_xfer_len = min(len - sent_len, write_count);
+ ret = scif_rb_write(&qp->outbound_q, msg,
+ curr_xfer_len);
+ if (ret < 0)
+ break;
+ /* Success. Update write pointer */
+ scif_rb_commit(&qp->outbound_q);
+ /*
+ * Send a notification to the peer about the
+ * produced data message.
+ */
+ notif_msg.src = ep->port;
+ notif_msg.uop = SCIF_CLIENT_SENT;
+ notif_msg.payload[0] = ep->remote_ep;
+ ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg);
+ if (ret)
+ break;
+ sent_len += curr_xfer_len;
+ msg = msg + curr_xfer_len;
+ continue;
+ }
+ curr_xfer_len = min(len - sent_len, SCIF_ENDPT_QP_SIZE - 1);
+ /* Not enough RB space. return for the Non Blocking case */
+ if (!(flags & SCIF_SEND_BLOCK))
+ break;
+
+ spin_unlock(&ep->lock);
+ /* Wait for a SCIF_CLIENT_RCVD message in the Blocking case */
+ ret =
+ wait_event_interruptible(ep->sendwq,
+ (SCIFEP_CONNECTED != ep->state) ||
+ (scif_rb_space(&qp->outbound_q) >=
+ curr_xfer_len));
+ spin_lock(&ep->lock);
+ if (ret)
+ break;
+ }
+ if (sent_len)
+ ret = sent_len;
+ else if (!ret && SCIFEP_CONNECTED != ep->state)
+ ret = SCIFEP_DISCONNECTED == ep->state ?
+ -ECONNRESET : -ENOTCONN;
+ spin_unlock(&ep->lock);
+ return ret;
+}
+
+static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
+{
+ int read_size;
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ struct scifmsg notif_msg;
+ int curr_recv_len = 0, remaining_len = len, read_count;
+ int ret = 0;
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (flags & SCIF_RECV_BLOCK)
+ might_sleep();
+ spin_lock(&ep->lock);
+ while (remaining_len && (SCIFEP_CONNECTED == ep->state ||
+ SCIFEP_DISCONNECTED == ep->state)) {
+ read_count = scif_rb_count(&qp->inbound_q, remaining_len);
+ if (read_count) {
+ /*
+ * Best effort to recv as much data as there
+ * are bytes to read in the RB particularly
+ * important for the Non Blocking case.
+ */
+ curr_recv_len = min(remaining_len, read_count);
+ read_size = scif_rb_get_next(&qp->inbound_q,
+ msg, curr_recv_len);
+ if (ep->state == SCIFEP_CONNECTED) {
+ /*
+ * Update the read pointer only if the endpoint
+ * is still connected else the read pointer
+ * might no longer exist since the peer has
+ * freed resources!
+ */
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ /*
+ * Send a notification to the peer about the
+ * consumed data message only if the EP is in
+ * SCIFEP_CONNECTED state.
+ */
+ notif_msg.src = ep->port;
+ notif_msg.uop = SCIF_CLIENT_RCVD;
+ notif_msg.payload[0] = ep->remote_ep;
+ ret = _scif_nodeqp_send(ep->remote_dev,
+ &notif_msg);
+ if (ret)
+ break;
+ }
+ remaining_len -= curr_recv_len;
+ msg = msg + curr_recv_len;
+ continue;
+ }
+ /*
+ * Bail out now if the EP is in SCIFEP_DISCONNECTED state else
+ * we will keep looping forever.
+ */
+ if (ep->state == SCIFEP_DISCONNECTED)
+ break;
+ /*
+ * Return in the Non Blocking case if there is no data
+ * to read in this iteration.
+ */
+ if (!(flags & SCIF_RECV_BLOCK))
+ break;
+ curr_recv_len = min(remaining_len, SCIF_ENDPT_QP_SIZE - 1);
+ spin_unlock(&ep->lock);
+ /*
+ * Wait for a SCIF_CLIENT_SEND message in the blocking case
+ * or until other side disconnects.
+ */
+ ret =
+ wait_event_interruptible(ep->recvwq,
+ SCIFEP_CONNECTED != ep->state ||
+ scif_rb_count(&qp->inbound_q,
+ curr_recv_len)
+ >= curr_recv_len);
+ spin_lock(&ep->lock);
+ if (ret)
+ break;
+ }
+ if (len - remaining_len)
+ ret = len - remaining_len;
+ else if (!ret && ep->state != SCIFEP_CONNECTED)
+ ret = ep->state == SCIFEP_DISCONNECTED ?
+ -ECONNRESET : -ENOTCONN;
+ spin_unlock(&ep->lock);
+ return ret;
+}
+
+/**
+ * scif_user_send() - Send data to connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the driver IOCTL entry point
+ * only and is a wrapper for _scif_send().
+ */
+int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int err = 0;
+ int sent_len = 0;
+ char *tmp;
+ int loop_len;
+ int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ err = scif_msg_param_check(epd, len, flags);
+ if (err)
+ goto send_err;
+
+ tmp = kmalloc(chunk_len, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto send_err;
+ }
+ /*
+ * Grabbing the lock before breaking up the transfer in
+ * multiple chunks is required to ensure that messages do
+ * not get fragmented and reordered.
+ */
+ mutex_lock(&ep->sendlock);
+ while (sent_len != len) {
+ loop_len = len - sent_len;
+ loop_len = min(chunk_len, loop_len);
+ if (copy_from_user(tmp, msg, loop_len)) {
+ err = -EFAULT;
+ goto send_free_err;
+ }
+ err = _scif_send(epd, tmp, loop_len, flags);
+ if (err < 0)
+ goto send_free_err;
+ sent_len += err;
+ msg += err;
+ if (err != loop_len)
+ goto send_free_err;
+ }
+send_free_err:
+ mutex_unlock(&ep->sendlock);
+ kfree(tmp);
+send_err:
+ return err < 0 ? err : sent_len;
+}
+
+/**
+ * scif_user_recv() - Receive data from connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the driver IOCTL entry point
+ * only and is a wrapper for _scif_recv().
+ */
+int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int err = 0;
+ int recv_len = 0;
+ char *tmp;
+ int loop_len;
+ int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ err = scif_msg_param_check(epd, len, flags);
+ if (err)
+ goto recv_err;
+
+ tmp = kmalloc(chunk_len, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto recv_err;
+ }
+ /*
+ * Grabbing the lock before breaking up the transfer in
+ * multiple chunks is required to ensure that messages do
+ * not get fragmented and reordered.
+ */
+ mutex_lock(&ep->recvlock);
+ while (recv_len != len) {
+ loop_len = len - recv_len;
+ loop_len = min(chunk_len, loop_len);
+ err = _scif_recv(epd, tmp, loop_len, flags);
+ if (err < 0)
+ goto recv_free_err;
+ if (copy_to_user(msg, tmp, err)) {
+ err = -EFAULT;
+ goto recv_free_err;
+ }
+ recv_len += err;
+ msg += err;
+ if (err != loop_len)
+ goto recv_free_err;
+ }
+recv_free_err:
+ mutex_unlock(&ep->recvlock);
+ kfree(tmp);
+recv_err:
+ return err < 0 ? err : recv_len;
+}
+
+/**
+ * scif_send() - Send data to connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the kernel mode only and is
+ * a wrapper for _scif_send().
+ */
+int scif_send(scif_epd_t epd, void *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int ret;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ ret = scif_msg_param_check(epd, len, flags);
+ if (ret)
+ return ret;
+ if (!ep->remote_dev)
+ return -ENOTCONN;
+ /*
+ * Grab the mutex lock in the blocking case only
+ * to ensure messages do not get fragmented/reordered.
+ * The non blocking mode is protected using spin locks
+ * in _scif_send().
+ */
+ if (flags & SCIF_SEND_BLOCK)
+ mutex_lock(&ep->sendlock);
+
+ ret = _scif_send(epd, msg, len, flags);
+
+ if (flags & SCIF_SEND_BLOCK)
+ mutex_unlock(&ep->sendlock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scif_send);
+
+/**
+ * scif_recv() - Receive data from connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the kernel mode only and is
+ * a wrapper for _scif_recv().
+ */
+int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int ret;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ ret = scif_msg_param_check(epd, len, flags);
+ if (ret)
+ return ret;
+ /*
+ * Grab the mutex lock in the blocking case only
+ * to ensure messages do not get fragmented/reordered.
+ * The non blocking mode is protected using spin locks
+ * in _scif_send().
+ */
+ if (flags & SCIF_RECV_BLOCK)
+ mutex_lock(&ep->recvlock);
+
+ ret = _scif_recv(epd, msg, len, flags);
+
+ if (flags & SCIF_RECV_BLOCK)
+ mutex_unlock(&ep->recvlock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scif_recv);
+
+int scif_get_node_ids(u16 *nodes, int len, u16 *self)
+{
+ int online = 0;
+ int offset = 0;
+ int node;
+
+ if (!scif_is_mgmt_node())
+ scif_get_node_info();
+
+ *self = scif_info.nodeid;
+ mutex_lock(&scif_info.conflock);
+ len = min_t(int, len, scif_info.total);
+ for (node = 0; node <= scif_info.maxid; node++) {
+ if (_scifdev_alive(&scif_dev[node])) {
+ online++;
+ if (offset < len)
+ nodes[offset++] = node;
+ }
+ }
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI get_node_ids total %d online %d filled in %d nodes\n",
+ scif_info.total, online, offset);
+ mutex_unlock(&scif_info.conflock);
+
+ return online;
+}
+EXPORT_SYMBOL_GPL(scif_get_node_ids);
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
new file mode 100644
index 000000000000..51f14e2a1196
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_debugfs.c
@@ -0,0 +1,85 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "../common/mic_dev.h"
+#include "scif_main.h"
+
+/* Debugfs parent dir */
+static struct dentry *scif_dbg;
+
+static int scif_dev_test(struct seq_file *s, void *unused)
+{
+ int node;
+
+ seq_printf(s, "Total Nodes %d Self Node Id %d Maxid %d\n",
+ scif_info.total, scif_info.nodeid,
+ scif_info.maxid);
+
+ if (!scif_dev)
+ return 0;
+
+ seq_printf(s, "%-16s\t%-16s\n", "node_id", "state");
+
+ for (node = 0; node <= scif_info.maxid; node++)
+ seq_printf(s, "%-16d\t%-16s\n", scif_dev[node].node,
+ _scifdev_alive(&scif_dev[node]) ?
+ "Running" : "Offline");
+ return 0;
+}
+
+static int scif_dev_test_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, scif_dev_test, inode->i_private);
+}
+
+static int scif_dev_test_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations scif_dev_ops = {
+ .owner = THIS_MODULE,
+ .open = scif_dev_test_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = scif_dev_test_release
+};
+
+void __init scif_init_debugfs(void)
+{
+ struct dentry *d;
+
+ scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!scif_dbg) {
+ dev_err(scif_info.mdev.this_device,
+ "can't create debugfs dir scif\n");
+ return;
+ }
+
+ d = debugfs_create_file("scif_dev", 0444, scif_dbg,
+ NULL, &scif_dev_ops);
+ debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
+ debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
+}
+
+void scif_exit_debugfs(void)
+{
+ debugfs_remove_recursive(scif_dbg);
+}
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
new file mode 100644
index 000000000000..b4bfbb08a8e3
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -0,0 +1,353 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "scif_main.h"
+#include "scif_map.h"
+
+void scif_cleanup_ep_qp(struct scif_endpt *ep)
+{
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (qp->outbound_q.rb_base) {
+ scif_iounmap((void *)qp->outbound_q.rb_base,
+ qp->outbound_q.size, ep->remote_dev);
+ qp->outbound_q.rb_base = NULL;
+ }
+ if (qp->remote_qp) {
+ scif_iounmap((void *)qp->remote_qp,
+ sizeof(struct scif_qp), ep->remote_dev);
+ qp->remote_qp = NULL;
+ }
+ if (qp->local_qp) {
+ scif_unmap_single(qp->local_qp, ep->remote_dev,
+ sizeof(struct scif_qp));
+ qp->local_qp = 0x0;
+ }
+ if (qp->local_buf) {
+ scif_unmap_single(qp->local_buf, ep->remote_dev,
+ SCIF_ENDPT_QP_SIZE);
+ qp->local_buf = 0;
+ }
+}
+
+void scif_teardown_ep(void *endpt)
+{
+ struct scif_endpt *ep = endpt;
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (qp) {
+ spin_lock(&ep->lock);
+ scif_cleanup_ep_qp(ep);
+ spin_unlock(&ep->lock);
+ kfree(qp->inbound_q.rb_base);
+ kfree(qp);
+ }
+}
+
+/*
+ * Enqueue the endpoint to the zombie list for cleanup.
+ * The endpoint should not be accessed once this API returns.
+ */
+void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
+{
+ if (!eplock_held)
+ spin_lock(&scif_info.eplock);
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_ZOMBIE;
+ spin_unlock(&ep->lock);
+ list_add_tail(&ep->list, &scif_info.zombie);
+ scif_info.nr_zombies++;
+ if (!eplock_held)
+ spin_unlock(&scif_info.eplock);
+ schedule_work(&scif_info.misc_work);
+}
+
+static struct scif_endpt *scif_find_listen_ep(u16 port)
+{
+ struct scif_endpt *ep = NULL;
+ struct list_head *pos, *tmpq;
+
+ spin_lock(&scif_info.eplock);
+ list_for_each_safe(pos, tmpq, &scif_info.listen) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ if (ep->port.port == port) {
+ spin_lock(&ep->lock);
+ spin_unlock(&scif_info.eplock);
+ return ep;
+ }
+ }
+ spin_unlock(&scif_info.eplock);
+ return NULL;
+}
+
+void scif_cleanup_zombie_epd(void)
+{
+ struct list_head *pos, *tmpq;
+ struct scif_endpt *ep;
+
+ spin_lock(&scif_info.eplock);
+ list_for_each_safe(pos, tmpq, &scif_info.zombie) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ list_del(pos);
+ scif_info.nr_zombies--;
+ kfree(ep);
+ }
+ spin_unlock(&scif_info.eplock);
+}
+
+/**
+ * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
+ * @msg: Interrupt message
+ *
+ * This message is initiated by the remote node to request a connection
+ * to the local node. This function looks for an end point in the
+ * listen state on the requested port id.
+ *
+ * If it finds a listening port it places the connect request on the
+ * listening end points queue and wakes up any pending accept calls.
+ *
+ * If it does not find a listening end point it sends a connection
+ * reject message to the remote node.
+ */
+void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = NULL;
+ struct scif_conreq *conreq;
+
+ conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
+ if (!conreq)
+ /* Lack of resources so reject the request. */
+ goto conreq_sendrej;
+
+ ep = scif_find_listen_ep(msg->dst.port);
+ if (!ep)
+ /* Send reject due to no listening ports */
+ goto conreq_sendrej_free;
+
+ if (ep->backlog <= ep->conreqcnt) {
+ /* Send reject due to too many pending requests */
+ spin_unlock(&ep->lock);
+ goto conreq_sendrej_free;
+ }
+
+ conreq->msg = *msg;
+ list_add_tail(&conreq->list, &ep->conlist);
+ ep->conreqcnt++;
+ wake_up_interruptible(&ep->conwq);
+ spin_unlock(&ep->lock);
+ return;
+
+conreq_sendrej_free:
+ kfree(conreq);
+conreq_sendrej:
+ msg->uop = SCIF_CNCT_REJ;
+ scif_nodeqp_send(&scif_dev[msg->src.node], msg);
+}
+
+/**
+ * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
+ * @msg: Interrupt message
+ *
+ * An accept() on the remote node has occurred and sent this message
+ * to indicate success. Place the end point in the MAPPING state and
+ * save the remote nodes memory information. Then wake up the connect
+ * request so it can finish.
+ */
+void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTING == ep->state) {
+ ep->peer.node = msg->src.node;
+ ep->peer.port = msg->src.port;
+ ep->qp_info.gnt_pld = msg->payload[1];
+ ep->remote_ep = msg->payload[2];
+ ep->state = SCIFEP_MAPPING;
+
+ wake_up(&ep->conwq);
+ }
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote connection request has finished mapping the local memory.
+ * Place the connection in the connected state and wake up the pending
+ * accept() call.
+ */
+void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ mutex_lock(&scif_info.connlock);
+ spin_lock(&ep->lock);
+ /* New ep is now connected with all resources set. */
+ ep->state = SCIFEP_CONNECTED;
+ list_add_tail(&ep->list, &scif_info.connected);
+ wake_up(&ep->conwq);
+ spin_unlock(&ep->lock);
+ mutex_unlock(&scif_info.connlock);
+}
+
+/**
+ * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote connection request failed to map the local memory it was sent.
+ * Place the end point in the CLOSING state to indicate it and wake up
+ * the pending accept();
+ */
+void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_CLOSING;
+ wake_up(&ep->conwq);
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote end has rejected the connection request. Set the end
+ * point back to the bound state and wake up the pending connect().
+ */
+void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTING == ep->state) {
+ ep->state = SCIFEP_BOUND;
+ wake_up(&ep->conwq);
+ }
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote node has indicated close() has been called on its end
+ * point. Remove the local end point from the connected list, set its
+ * state to disconnected and ensure accesses to the remote node are
+ * shutdown.
+ *
+ * When all accesses to the remote end have completed then send a
+ * DISCNT_ACK to indicate it can remove its resources and complete
+ * the close routine.
+ */
+void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = NULL;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ /*
+ * The local ep may have sent a disconnect and and been closed
+ * due to a message response time out. It may have been
+ * allocated again and formed a new connection so we want to
+ * check if the remote ep matches
+ */
+ if (((u64)tmpep == msg->payload[1]) &&
+ ((u64)tmpep->remote_ep == msg->payload[0])) {
+ list_del(pos);
+ ep = tmpep;
+ spin_lock(&ep->lock);
+ break;
+ }
+ }
+
+ /*
+ * If the terminated end is not found then this side started closing
+ * before the other side sent the disconnect. If so the ep will no
+ * longer be on the connected list. Regardless the other side
+ * needs to be acked to let it know close is complete.
+ */
+ if (!ep) {
+ mutex_unlock(&scif_info.connlock);
+ goto discnct_ack;
+ }
+
+ ep->state = SCIFEP_DISCONNECTED;
+ list_add_tail(&ep->list, &scif_info.disconnected);
+
+ wake_up_interruptible(&ep->sendwq);
+ wake_up_interruptible(&ep->recvwq);
+ spin_unlock(&ep->lock);
+ mutex_unlock(&scif_info.connlock);
+
+discnct_ack:
+ msg->uop = SCIF_DISCNT_ACK;
+ scif_nodeqp_send(&scif_dev[msg->src.node], msg);
+}
+
+/**
+ * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
+ * @msg: Interrupt message
+ *
+ * Remote side has indicated it has not more references to local resources
+ */
+void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_DISCONNECTED;
+ spin_unlock(&ep->lock);
+ complete(&ep->discon);
+}
+
+/**
+ * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
+ * @msg: Interrupt message
+ *
+ * Remote side is confirming send or receive interrupt handling is complete.
+ */
+void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTED == ep->state)
+ wake_up_interruptible(&ep->recvwq);
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
+ * @msg: Interrupt message
+ *
+ * Remote side is confirming send or receive interrupt handling is complete.
+ */
+void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTED == ep->state)
+ wake_up_interruptible(&ep->sendwq);
+ spin_unlock(&ep->lock);
+}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
new file mode 100644
index 000000000000..331322a25213
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -0,0 +1,160 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_EPD_H
+#define SCIF_EPD_H
+
+#include <linux/delay.h>
+#include <linux/scif.h>
+#include <linux/scif_ioctl.h>
+
+#define SCIF_EPLOCK_HELD true
+
+enum scif_epd_state {
+ SCIFEP_UNBOUND,
+ SCIFEP_BOUND,
+ SCIFEP_LISTENING,
+ SCIFEP_CONNECTED,
+ SCIFEP_CONNECTING,
+ SCIFEP_MAPPING,
+ SCIFEP_CLOSING,
+ SCIFEP_CLLISTEN,
+ SCIFEP_DISCONNECTED,
+ SCIFEP_ZOMBIE
+};
+
+/*
+ * struct scif_conreq - Data structure added to the connection list.
+ *
+ * @msg: connection request message received
+ * @list: link to list of connection requests
+ */
+struct scif_conreq {
+ struct scifmsg msg;
+ struct list_head list;
+};
+
+/* Size of the RB for the Endpoint QP */
+#define SCIF_ENDPT_QP_SIZE 0x1000
+
+/*
+ * scif_endpt_qp_info - SCIF endpoint queue pair
+ *
+ * @qp - Qpair for this endpoint
+ * @qp_offset - DMA address of the QP
+ * @gnt_pld - Payload in a SCIF_CNCT_GNT message containing the
+ * physical address of the remote_qp.
+ */
+struct scif_endpt_qp_info {
+ struct scif_qp *qp;
+ dma_addr_t qp_offset;
+ dma_addr_t gnt_pld;
+};
+
+/*
+ * struct scif_endpt - The SCIF endpoint data structure
+ *
+ * @state: end point state
+ * @lock: lock synchronizing access to endpoint fields like state etc
+ * @port: self port information
+ * @peer: peer port information
+ * @backlog: maximum pending connection requests
+ * @qp_info: Endpoint QP information for SCIF messaging
+ * @remote_dev: scifdev used by this endpt to communicate with remote node.
+ * @remote_ep: remote endpoint
+ * @conreqcnt: Keep track of number of connection requests.
+ * @files: Open file information used to match the id passed in with
+ * the flush routine.
+ * @conlist: list of connection requests
+ * @conwq: waitqueue for connection processing
+ * @discon: completion used during disconnection
+ * @sendwq: waitqueue used during sending messages
+ * @recvwq: waitqueue used during message receipt
+ * @sendlock: Synchronize ordering of messages sent
+ * @recvlock: Synchronize ordering of messages received
+ * @list: link to list of various endpoints like connected, listening etc
+ * @li_accept: pending ACCEPTREG
+ * @acceptcnt: pending ACCEPTREG cnt
+ * @liacceptlist: link to listen accept
+ * @miacceptlist: link to uaccept
+ * @listenep: associated listen ep
+ * @conn_work: Non blocking connect work
+ * @conn_port: Connection port
+ * @conn_err: Errors during connection
+ * @conn_async_state: Async connection
+ * @conn_list: List of async connection requests
+ */
+struct scif_endpt {
+ enum scif_epd_state state;
+ spinlock_t lock;
+ struct scif_port_id port;
+ struct scif_port_id peer;
+ int backlog;
+ struct scif_endpt_qp_info qp_info;
+ struct scif_dev *remote_dev;
+ u64 remote_ep;
+ int conreqcnt;
+ struct files_struct *files;
+ struct list_head conlist;
+ wait_queue_head_t conwq;
+ struct completion discon;
+ wait_queue_head_t sendwq;
+ wait_queue_head_t recvwq;
+ struct mutex sendlock;
+ struct mutex recvlock;
+ struct list_head list;
+ struct list_head li_accept;
+ int acceptcnt;
+ struct list_head liacceptlist;
+ struct list_head miacceptlist;
+ struct scif_endpt *listenep;
+ struct scif_port_id conn_port;
+ int conn_err;
+ int conn_async_state;
+ struct list_head conn_list;
+};
+
+static inline int scifdev_alive(struct scif_endpt *ep)
+{
+ return _scifdev_alive(ep->remote_dev);
+}
+
+void scif_cleanup_zombie_epd(void);
+void scif_teardown_ep(void *endpt);
+void scif_cleanup_ep_qp(struct scif_endpt *ep);
+void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held);
+void scif_get_node_info(void);
+void scif_send_acks(struct scif_dev *dev);
+void scif_conn_handler(struct work_struct *work);
+int scif_rsrv_port(u16 port);
+void scif_get_port(u16 port);
+int scif_get_new_port(void);
+void scif_put_port(u16 port);
+int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags);
+int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags);
+void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
+int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
+int __scif_flush(scif_epd_t epd);
+#endif /* SCIF_EPD_H */
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
new file mode 100644
index 000000000000..eccf7e7135f9
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -0,0 +1,303 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "scif_main.h"
+
+static int scif_fdopen(struct inode *inode, struct file *f)
+{
+ struct scif_endpt *priv = scif_open();
+
+ if (!priv)
+ return -ENOMEM;
+ f->private_data = priv;
+ return 0;
+}
+
+static int scif_fdclose(struct inode *inode, struct file *f)
+{
+ struct scif_endpt *priv = f->private_data;
+
+ return scif_close(priv);
+}
+
+static int scif_fdflush(struct file *f, fl_owner_t id)
+{
+ struct scif_endpt *ep = f->private_data;
+
+ spin_lock(&ep->lock);
+ /*
+ * The listening endpoint stashes the open file information before
+ * waiting for incoming connections. The release callback would never be
+ * called if the application closed the endpoint, while waiting for
+ * incoming connections from a separate thread since the file descriptor
+ * reference count is bumped up in the accept IOCTL. Call the flush
+ * routine if the id matches the endpoint open file information so that
+ * the listening endpoint can be woken up and the fd released.
+ */
+ if (ep->files == id)
+ __scif_flush(ep);
+ spin_unlock(&ep->lock);
+ return 0;
+}
+
+static __always_inline void scif_err_debug(int err, const char *str)
+{
+ /*
+ * ENOTCONN is a common uninteresting error which is
+ * flooding debug messages to the console unnecessarily.
+ */
+ if (err < 0 && err != -ENOTCONN)
+ dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
+}
+
+static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct scif_endpt *priv = f->private_data;
+ void __user *argp = (void __user *)arg;
+ int err = 0;
+ struct scifioctl_msg request;
+ bool non_block = false;
+
+ non_block = !!(f->f_flags & O_NONBLOCK);
+
+ switch (cmd) {
+ case SCIF_BIND:
+ {
+ int pn;
+
+ if (copy_from_user(&pn, argp, sizeof(pn)))
+ return -EFAULT;
+
+ pn = scif_bind(priv, pn);
+ if (pn < 0)
+ return pn;
+
+ if (copy_to_user(argp, &pn, sizeof(pn)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case SCIF_LISTEN:
+ return scif_listen(priv, arg);
+ case SCIF_CONNECT:
+ {
+ struct scifioctl_connect req;
+ struct scif_endpt *ep = (struct scif_endpt *)priv;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ err = __scif_connect(priv, &req.peer, non_block);
+ if (err < 0)
+ return err;
+
+ req.self.node = ep->port.node;
+ req.self.port = ep->port.port;
+
+ if (copy_to_user(argp, &req, sizeof(req)))
+ return -EFAULT;
+
+ return 0;
+ }
+ /*
+ * Accept is done in two halves. The request ioctl does the basic
+ * functionality of accepting the request and returning the information
+ * about it including the internal ID of the end point. The register
+ * is done with the internal ID on a new file descriptor opened by the
+ * requesting process.
+ */
+ case SCIF_ACCEPTREQ:
+ {
+ struct scifioctl_accept request;
+ scif_epd_t *ep = (scif_epd_t *)&request.endpt;
+
+ if (copy_from_user(&request, argp, sizeof(request)))
+ return -EFAULT;
+
+ err = scif_accept(priv, &request.peer, ep, request.flags);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(argp, &request, sizeof(request))) {
+ scif_close(*ep);
+ return -EFAULT;
+ }
+ /*
+ * Add to the list of user mode eps where the second half
+ * of the accept is not yet completed.
+ */
+ spin_lock(&scif_info.eplock);
+ list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
+ list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
+ (*ep)->listenep = priv;
+ priv->acceptcnt++;
+ spin_unlock(&scif_info.eplock);
+
+ return 0;
+ }
+ case SCIF_ACCEPTREG:
+ {
+ struct scif_endpt *priv = f->private_data;
+ struct scif_endpt *newep;
+ struct scif_endpt *lisep;
+ struct scif_endpt *fep = NULL;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+
+ /* Finally replace the pointer to the accepted endpoint */
+ if (copy_from_user(&newep, argp, sizeof(void *)))
+ return -EFAULT;
+
+ /* Remove form the user accept queue */
+ spin_lock(&scif_info.eplock);
+ list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, miacceptlist);
+ if (tmpep == newep) {
+ list_del(pos);
+ fep = tmpep;
+ break;
+ }
+ }
+
+ if (!fep) {
+ spin_unlock(&scif_info.eplock);
+ return -ENOENT;
+ }
+
+ lisep = newep->listenep;
+ list_for_each_safe(pos, tmpq, &lisep->li_accept) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, liacceptlist);
+ if (tmpep == newep) {
+ list_del(pos);
+ lisep->acceptcnt--;
+ break;
+ }
+ }
+
+ spin_unlock(&scif_info.eplock);
+
+ /* Free the resources automatically created from the open. */
+ scif_teardown_ep(priv);
+ scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
+ f->private_data = newep;
+ return 0;
+ }
+ case SCIF_SEND:
+ {
+ struct scif_endpt *priv = f->private_data;
+
+ if (copy_from_user(&request, argp,
+ sizeof(struct scifioctl_msg))) {
+ err = -EFAULT;
+ goto send_err;
+ }
+ err = scif_user_send(priv, (void __user *)request.msg,
+ request.len, request.flags);
+ if (err < 0)
+ goto send_err;
+ if (copy_to_user(&
+ ((struct scifioctl_msg __user *)argp)->out_len,
+ &err, sizeof(err))) {
+ err = -EFAULT;
+ goto send_err;
+ }
+ err = 0;
+send_err:
+ scif_err_debug(err, "scif_send");
+ return err;
+ }
+ case SCIF_RECV:
+ {
+ struct scif_endpt *priv = f->private_data;
+
+ if (copy_from_user(&request, argp,
+ sizeof(struct scifioctl_msg))) {
+ err = -EFAULT;
+ goto recv_err;
+ }
+
+ err = scif_user_recv(priv, (void __user *)request.msg,
+ request.len, request.flags);
+ if (err < 0)
+ goto recv_err;
+
+ if (copy_to_user(&
+ ((struct scifioctl_msg __user *)argp)->out_len,
+ &err, sizeof(err))) {
+ err = -EFAULT;
+ goto recv_err;
+ }
+ err = 0;
+recv_err:
+ scif_err_debug(err, "scif_recv");
+ return err;
+ }
+ case SCIF_GET_NODEIDS:
+ {
+ struct scifioctl_node_ids node_ids;
+ int entries;
+ u16 *nodes;
+ void __user *unodes, *uself;
+ u16 self;
+
+ if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
+ err = -EFAULT;
+ goto getnodes_err2;
+ }
+
+ entries = min_t(int, scif_info.maxid, node_ids.len);
+ nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
+ if (entries && !nodes) {
+ err = -ENOMEM;
+ goto getnodes_err2;
+ }
+ node_ids.len = scif_get_node_ids(nodes, entries, &self);
+
+ unodes = (void __user *)node_ids.nodes;
+ if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
+ err = -EFAULT;
+ goto getnodes_err1;
+ }
+
+ uself = (void __user *)node_ids.self;
+ if (copy_to_user(uself, &self, sizeof(u16))) {
+ err = -EFAULT;
+ goto getnodes_err1;
+ }
+
+ if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
+ err = -EFAULT;
+ goto getnodes_err1;
+ }
+getnodes_err1:
+ kfree(nodes);
+getnodes_err2:
+ return err;
+ }
+ }
+ return -EINVAL;
+}
+
+const struct file_operations scif_fops = {
+ .open = scif_fdopen,
+ .release = scif_fdclose,
+ .unlocked_ioctl = scif_fdioctl,
+ .flush = scif_fdflush,
+ .owner = THIS_MODULE,
+};
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
new file mode 100644
index 000000000000..6ce851f5c7e6
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_main.c
@@ -0,0 +1,388 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/module.h>
+#include <linux/idr.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "../bus/scif_bus.h"
+#include "scif_peer_bus.h"
+#include "scif_main.h"
+#include "scif_map.h"
+
+struct scif_info scif_info = {
+ .mdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "scif",
+ .fops = &scif_fops,
+ }
+};
+
+struct scif_dev *scif_dev;
+static atomic_t g_loopb_cnt;
+
+/* Runs in the context of intr_wq */
+static void scif_intr_bh_handler(struct work_struct *work)
+{
+ struct scif_dev *scifdev =
+ container_of(work, struct scif_dev, intr_bh);
+
+ if (scifdev_self(scifdev))
+ scif_loopb_msg_handler(scifdev, scifdev->qpairs);
+ else
+ scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
+}
+
+int scif_setup_intr_wq(struct scif_dev *scifdev)
+{
+ if (!scifdev->intr_wq) {
+ snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
+ "SCIF INTR %d", scifdev->node);
+ scifdev->intr_wq =
+ alloc_ordered_workqueue(scifdev->intr_wqname, 0);
+ if (!scifdev->intr_wq)
+ return -ENOMEM;
+ INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
+ }
+ return 0;
+}
+
+void scif_destroy_intr_wq(struct scif_dev *scifdev)
+{
+ if (scifdev->intr_wq) {
+ destroy_workqueue(scifdev->intr_wq);
+ scifdev->intr_wq = NULL;
+ }
+}
+
+irqreturn_t scif_intr_handler(int irq, void *data)
+{
+ struct scif_dev *scifdev = data;
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
+ queue_work(scifdev->intr_wq, &scifdev->intr_bh);
+ return IRQ_HANDLED;
+}
+
+static int scif_peer_probe(struct scif_peer_dev *spdev)
+{
+ struct scif_dev *scifdev = &scif_dev[spdev->dnode];
+
+ mutex_lock(&scif_info.conflock);
+ scif_info.total++;
+ scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
+ mutex_unlock(&scif_info.conflock);
+ rcu_assign_pointer(scifdev->spdev, spdev);
+
+ /* In the future SCIF kernel client devices will be added here */
+ return 0;
+}
+
+static void scif_peer_remove(struct scif_peer_dev *spdev)
+{
+ struct scif_dev *scifdev = &scif_dev[spdev->dnode];
+
+ /* In the future SCIF kernel client devices will be removed here */
+ spdev = rcu_dereference(scifdev->spdev);
+ if (spdev)
+ RCU_INIT_POINTER(scifdev->spdev, NULL);
+ synchronize_rcu();
+
+ mutex_lock(&scif_info.conflock);
+ scif_info.total--;
+ mutex_unlock(&scif_info.conflock);
+}
+
+static void scif_qp_setup_handler(struct work_struct *work)
+{
+ struct scif_dev *scifdev = container_of(work, struct scif_dev,
+ qp_dwork.work);
+ struct scif_hw_dev *sdev = scifdev->sdev;
+ dma_addr_t da = 0;
+ int err;
+
+ if (scif_is_mgmt_node()) {
+ struct mic_bootparam *bp = sdev->dp;
+
+ da = bp->scif_card_dma_addr;
+ scifdev->rdb = bp->h2c_scif_db;
+ } else {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ da = readq(&bp->scif_host_dma_addr);
+ scifdev->rdb = ioread8(&bp->c2h_scif_db);
+ }
+ if (da) {
+ err = scif_qp_response(da, scifdev);
+ if (err)
+ dev_err(&scifdev->sdev->dev,
+ "scif_qp_response err %d\n", err);
+ } else {
+ schedule_delayed_work(&scifdev->qp_dwork,
+ msecs_to_jiffies(1000));
+ }
+}
+
+static int scif_setup_scifdev(struct scif_hw_dev *sdev)
+{
+ int i;
+ u8 num_nodes;
+
+ if (sdev->snode) {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ num_nodes = ioread8(&bp->tot_nodes);
+ } else {
+ struct mic_bootparam *bp = sdev->dp;
+
+ num_nodes = bp->tot_nodes;
+ }
+ scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
+ if (!scif_dev)
+ return -ENOMEM;
+ for (i = 0; i < num_nodes; i++) {
+ struct scif_dev *scifdev = &scif_dev[i];
+
+ scifdev->node = i;
+ scifdev->exit = OP_IDLE;
+ init_waitqueue_head(&scifdev->disconn_wq);
+ mutex_init(&scifdev->lock);
+ INIT_WORK(&scifdev->init_msg_work, scif_qp_response_ack);
+ INIT_DELAYED_WORK(&scifdev->p2p_dwork,
+ scif_poll_qp_state);
+ INIT_DELAYED_WORK(&scifdev->qp_dwork,
+ scif_qp_setup_handler);
+ INIT_LIST_HEAD(&scifdev->p2p);
+ RCU_INIT_POINTER(scifdev->spdev, NULL);
+ }
+ return 0;
+}
+
+static void scif_destroy_scifdev(void)
+{
+ kfree(scif_dev);
+}
+
+static int scif_probe(struct scif_hw_dev *sdev)
+{
+ struct scif_dev *scifdev;
+ int rc;
+
+ dev_set_drvdata(&sdev->dev, sdev);
+ if (1 == atomic_add_return(1, &g_loopb_cnt)) {
+ struct scif_dev *loopb_dev;
+
+ rc = scif_setup_scifdev(sdev);
+ if (rc)
+ goto exit;
+ scifdev = &scif_dev[sdev->dnode];
+ scifdev->sdev = sdev;
+ loopb_dev = &scif_dev[sdev->snode];
+ loopb_dev->sdev = sdev;
+ rc = scif_setup_loopback_qp(loopb_dev);
+ if (rc)
+ goto free_sdev;
+ } else {
+ scifdev = &scif_dev[sdev->dnode];
+ scifdev->sdev = sdev;
+ }
+ rc = scif_setup_intr_wq(scifdev);
+ if (rc)
+ goto destroy_loopb;
+ rc = scif_setup_qp(scifdev);
+ if (rc)
+ goto destroy_intr;
+ scifdev->db = sdev->hw_ops->next_db(sdev);
+ scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
+ "SCIF_INTR", scifdev,
+ scifdev->db);
+ if (IS_ERR(scifdev->cookie)) {
+ rc = PTR_ERR(scifdev->cookie);
+ goto free_qp;
+ }
+ if (scif_is_mgmt_node()) {
+ struct mic_bootparam *bp = sdev->dp;
+
+ bp->c2h_scif_db = scifdev->db;
+ bp->scif_host_dma_addr = scifdev->qp_dma_addr;
+ } else {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ iowrite8(scifdev->db, &bp->h2c_scif_db);
+ writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
+ }
+ schedule_delayed_work(&scifdev->qp_dwork,
+ msecs_to_jiffies(1000));
+ return rc;
+free_qp:
+ scif_free_qp(scifdev);
+destroy_intr:
+ scif_destroy_intr_wq(scifdev);
+destroy_loopb:
+ if (atomic_dec_and_test(&g_loopb_cnt))
+ scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
+free_sdev:
+ scif_destroy_scifdev();
+exit:
+ return rc;
+}
+
+void scif_stop(struct scif_dev *scifdev)
+{
+ struct scif_dev *dev;
+ int i;
+
+ for (i = scif_info.maxid; i >= 0; i--) {
+ dev = &scif_dev[i];
+ if (scifdev_self(dev))
+ continue;
+ scif_handle_remove_node(i);
+ }
+}
+
+static void scif_remove(struct scif_hw_dev *sdev)
+{
+ struct scif_dev *scifdev = &scif_dev[sdev->dnode];
+
+ if (scif_is_mgmt_node()) {
+ struct mic_bootparam *bp = sdev->dp;
+
+ bp->c2h_scif_db = -1;
+ bp->scif_host_dma_addr = 0x0;
+ } else {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ iowrite8(-1, &bp->h2c_scif_db);
+ writeq(0x0, &bp->scif_card_dma_addr);
+ }
+ if (scif_is_mgmt_node()) {
+ scif_disconnect_node(scifdev->node, true);
+ } else {
+ scif_info.card_initiated_exit = true;
+ scif_stop(scifdev);
+ }
+ if (atomic_dec_and_test(&g_loopb_cnt))
+ scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
+ if (scifdev->cookie) {
+ sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
+ scifdev->cookie = NULL;
+ }
+ scif_destroy_intr_wq(scifdev);
+ cancel_delayed_work(&scifdev->qp_dwork);
+ scif_free_qp(scifdev);
+ scifdev->rdb = -1;
+ scifdev->sdev = NULL;
+}
+
+static struct scif_peer_driver scif_peer_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .probe = scif_peer_probe,
+ .remove = scif_peer_remove,
+};
+
+static struct scif_hw_dev_id id_table[] = {
+ { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct scif_driver scif_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = scif_probe,
+ .remove = scif_remove,
+};
+
+static int _scif_init(void)
+{
+ spin_lock_init(&scif_info.eplock);
+ spin_lock_init(&scif_info.nb_connect_lock);
+ spin_lock_init(&scif_info.port_lock);
+ mutex_init(&scif_info.conflock);
+ mutex_init(&scif_info.connlock);
+ INIT_LIST_HEAD(&scif_info.uaccept);
+ INIT_LIST_HEAD(&scif_info.listen);
+ INIT_LIST_HEAD(&scif_info.zombie);
+ INIT_LIST_HEAD(&scif_info.connected);
+ INIT_LIST_HEAD(&scif_info.disconnected);
+ INIT_LIST_HEAD(&scif_info.nb_connect_list);
+ init_waitqueue_head(&scif_info.exitwq);
+ scif_info.en_msg_log = 0;
+ scif_info.p2p_enable = 1;
+ INIT_WORK(&scif_info.misc_work, scif_misc_handler);
+ INIT_WORK(&scif_info.conn_work, scif_conn_handler);
+ idr_init(&scif_ports);
+ return 0;
+}
+
+static void _scif_exit(void)
+{
+ idr_destroy(&scif_ports);
+ scif_destroy_scifdev();
+}
+
+static int __init scif_init(void)
+{
+ struct miscdevice *mdev = &scif_info.mdev;
+ int rc;
+
+ _scif_init();
+ rc = scif_peer_bus_init();
+ if (rc)
+ goto exit;
+ rc = scif_peer_register_driver(&scif_peer_driver);
+ if (rc)
+ goto peer_bus_exit;
+ rc = scif_register_driver(&scif_driver);
+ if (rc)
+ goto unreg_scif_peer;
+ rc = misc_register(mdev);
+ if (rc)
+ goto unreg_scif;
+ scif_init_debugfs();
+ return 0;
+unreg_scif:
+ scif_unregister_driver(&scif_driver);
+unreg_scif_peer:
+ scif_peer_unregister_driver(&scif_peer_driver);
+peer_bus_exit:
+ scif_peer_bus_exit();
+exit:
+ _scif_exit();
+ return rc;
+}
+
+static void __exit scif_exit(void)
+{
+ scif_exit_debugfs();
+ misc_deregister(&scif_info.mdev);
+ scif_unregister_driver(&scif_driver);
+ scif_peer_unregister_driver(&scif_peer_driver);
+ scif_peer_bus_exit();
+ _scif_exit();
+}
+
+module_init(scif_init);
+module_exit(scif_exit);
+
+MODULE_DEVICE_TABLE(scif, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) SCIF driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h
new file mode 100644
index 000000000000..580bc63e1b23
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_main.h
@@ -0,0 +1,254 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_MAIN_H
+#define SCIF_MAIN_H
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/dmaengine.h>
+#include <linux/file.h>
+#include <linux/scif.h>
+
+#include "../common/mic_dev.h"
+
+#define SCIF_MGMT_NODE 0
+#define SCIF_DEFAULT_WATCHDOG_TO 30
+#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ)
+#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ)
+
+/*
+ * Generic state used for certain node QP message exchanges
+ * like Unregister, Alloc etc.
+ */
+enum scif_msg_state {
+ OP_IDLE = 1,
+ OP_IN_PROGRESS,
+ OP_COMPLETED,
+ OP_FAILED
+};
+
+/*
+ * struct scif_info - Global SCIF information
+ *
+ * @nodeid: Node ID this node is to others
+ * @maxid: Max known node ID
+ * @total: Total number of SCIF nodes
+ * @nr_zombies: number of zombie endpoints
+ * @eplock: Lock to synchronize listening, zombie endpoint lists
+ * @connlock: Lock to synchronize connected and disconnected lists
+ * @nb_connect_lock: Synchronize non blocking connect operations
+ * @port_lock: Synchronize access to SCIF ports
+ * @uaccept: List of user acceptreq waiting for acceptreg
+ * @listen: List of listening end points
+ * @zombie: List of zombie end points with pending RMA's
+ * @connected: List of end points in connected state
+ * @disconnected: List of end points in disconnected state
+ * @nb_connect_list: List for non blocking connections
+ * @misc_work: miscellaneous SCIF tasks
+ * @conflock: Lock to synchronize SCIF node configuration changes
+ * @en_msg_log: Enable debug message logging
+ * @p2p_enable: Enable P2P SCIF network
+ * @mdev: The MISC device
+ * @conn_work: Work for workqueue handling all connections
+ * @exitwq: Wait queue for waiting for an EXIT node QP message response
+ * @loopb_dev: Dummy SCIF device used for loopback
+ * @loopb_wq: Workqueue used for handling loopback messages
+ * @loopb_wqname[16]: Name of loopback workqueue
+ * @loopb_work: Used for submitting work to loopb_wq
+ * @loopb_recv_q: List of messages received on the loopb_wq
+ * @card_initiated_exit: set when the card has initiated the exit
+ */
+struct scif_info {
+ u8 nodeid;
+ u8 maxid;
+ u8 total;
+ u32 nr_zombies;
+ spinlock_t eplock;
+ struct mutex connlock;
+ spinlock_t nb_connect_lock;
+ spinlock_t port_lock;
+ struct list_head uaccept;
+ struct list_head listen;
+ struct list_head zombie;
+ struct list_head connected;
+ struct list_head disconnected;
+ struct list_head nb_connect_list;
+ struct work_struct misc_work;
+ struct mutex conflock;
+ u8 en_msg_log;
+ u8 p2p_enable;
+ struct miscdevice mdev;
+ struct work_struct conn_work;
+ wait_queue_head_t exitwq;
+ struct scif_dev *loopb_dev;
+ struct workqueue_struct *loopb_wq;
+ char loopb_wqname[16];
+ struct work_struct loopb_work;
+ struct list_head loopb_recv_q;
+ bool card_initiated_exit;
+};
+
+/*
+ * struct scif_p2p_info - SCIF mapping information used for P2P
+ *
+ * @ppi_peer_id - SCIF peer node id
+ * @ppi_sg - Scatter list for bar information (One for mmio and one for aper)
+ * @sg_nentries - Number of entries in the scatterlist
+ * @ppi_da: DMA address for MMIO and APER bars
+ * @ppi_len: Length of MMIO and APER bars
+ * @ppi_list: Link in list of mapping information
+ */
+struct scif_p2p_info {
+ u8 ppi_peer_id;
+ struct scatterlist *ppi_sg[2];
+ u64 sg_nentries[2];
+ dma_addr_t ppi_da[2];
+ u64 ppi_len[2];
+#define SCIF_PPI_MMIO 0
+#define SCIF_PPI_APER 1
+ struct list_head ppi_list;
+};
+
+/*
+ * struct scif_dev - SCIF remote device specific fields
+ *
+ * @node: Node id
+ * @p2p: List of P2P mapping information
+ * @qpairs: The node queue pair for exchanging control messages
+ * @intr_wq: Workqueue for handling Node QP messages
+ * @intr_wqname: Name of node QP workqueue for handling interrupts
+ * @intr_bh: Used for submitting work to intr_wq
+ * @lock: Lock used for synchronizing access to the scif device
+ * @sdev: SCIF hardware device on the SCIF hardware bus
+ * @db: doorbell the peer will trigger to generate an interrupt on self
+ * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer
+ * @cookie: Cookie received while registering the interrupt handler
+ * init_msg_work: work scheduled for SCIF_INIT message processing
+ * @p2p_dwork: Delayed work to enable polling for P2P state
+ * @qp_dwork: Delayed work for enabling polling for remote QP information
+ * @p2p_retry: Number of times to retry polling of P2P state
+ * @base_addr: P2P aperture bar base address
+ * @mic_mw mmio: The peer MMIO information used for P2P
+ * @spdev: SCIF peer device on the SCIF peer bus
+ * @node_remove_ack_pending: True if a node_remove_ack is pending
+ * @exit_ack_pending: true if an exit_ack is pending
+ * @disconn_wq: Used while waiting for a node remove response
+ * @disconn_rescnt: Keeps track of number of node remove requests sent
+ * @exit: Status of exit message
+ * @qp_dma_addr: Queue pair DMA address passed to the peer
+*/
+struct scif_dev {
+ u8 node;
+ struct list_head p2p;
+ struct scif_qp *qpairs;
+ struct workqueue_struct *intr_wq;
+ char intr_wqname[16];
+ struct work_struct intr_bh;
+ struct mutex lock;
+ struct scif_hw_dev *sdev;
+ int db;
+ int rdb;
+ struct mic_irq *cookie;
+ struct work_struct init_msg_work;
+ struct delayed_work p2p_dwork;
+ struct delayed_work qp_dwork;
+ int p2p_retry;
+ dma_addr_t base_addr;
+ struct mic_mw mmio;
+ struct scif_peer_dev __rcu *spdev;
+ bool node_remove_ack_pending;
+ bool exit_ack_pending;
+ wait_queue_head_t disconn_wq;
+ atomic_t disconn_rescnt;
+ enum scif_msg_state exit;
+ dma_addr_t qp_dma_addr;
+};
+
+extern struct scif_info scif_info;
+extern struct idr scif_ports;
+extern struct scif_dev *scif_dev;
+extern const struct file_operations scif_fops;
+
+/* Size of the RB for the Node QP */
+#define SCIF_NODE_QP_SIZE 0x10000
+
+#include "scif_nodeqp.h"
+
+/*
+ * scifdev_self:
+ * @dev: The remote SCIF Device
+ *
+ * Returns true if the SCIF Device passed is the self aka Loopback SCIF device.
+ */
+static inline int scifdev_self(struct scif_dev *dev)
+{
+ return dev->node == scif_info.nodeid;
+}
+
+static inline bool scif_is_mgmt_node(void)
+{
+ return !scif_info.nodeid;
+}
+
+/*
+ * scifdev_is_p2p:
+ * @dev: The remote SCIF Device
+ *
+ * Returns true if the SCIF Device is a MIC Peer to Peer SCIF device.
+ */
+static inline bool scifdev_is_p2p(struct scif_dev *dev)
+{
+ if (scif_is_mgmt_node())
+ return false;
+ else
+ return dev != &scif_dev[SCIF_MGMT_NODE] &&
+ !scifdev_self(dev);
+}
+
+/*
+ * scifdev_alive:
+ * @scifdev: The remote SCIF Device
+ *
+ * Returns true if the remote SCIF Device is running or sleeping for
+ * this endpoint.
+ */
+static inline int _scifdev_alive(struct scif_dev *scifdev)
+{
+ struct scif_peer_dev *spdev;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ rcu_read_unlock();
+ return !!spdev;
+}
+
+#include "scif_epd.h"
+
+void __init scif_init_debugfs(void);
+void scif_exit_debugfs(void);
+int scif_setup_intr_wq(struct scif_dev *scifdev);
+void scif_destroy_intr_wq(struct scif_dev *scifdev);
+void scif_cleanup_scifdev(struct scif_dev *dev);
+void scif_handle_remove_node(int node);
+void scif_disconnect_node(u32 node_id, bool mgmt_initiated);
+void scif_free_qp(struct scif_dev *dev);
+void scif_misc_handler(struct work_struct *work);
+void scif_stop(struct scif_dev *scifdev);
+irqreturn_t scif_intr_handler(int irq, void *data);
+#endif /* SCIF_MAIN_H */
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
new file mode 100644
index 000000000000..20e50b4e19b2
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -0,0 +1,113 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_MAP_H
+#define SCIF_MAP_H
+
+#include "../bus/scif_bus.h"
+
+static __always_inline void *
+scif_alloc_coherent(dma_addr_t *dma_handle,
+ struct scif_dev *scifdev, size_t size,
+ gfp_t gfp)
+{
+ void *va;
+
+ if (scifdev_self(scifdev)) {
+ va = kmalloc(size, gfp);
+ if (va)
+ *dma_handle = virt_to_phys(va);
+ } else {
+ va = dma_alloc_coherent(&scifdev->sdev->dev,
+ size, dma_handle, gfp);
+ if (va && scifdev_is_p2p(scifdev))
+ *dma_handle = *dma_handle + scifdev->base_addr;
+ }
+ return va;
+}
+
+static __always_inline void
+scif_free_coherent(void *va, dma_addr_t local,
+ struct scif_dev *scifdev, size_t size)
+{
+ if (scifdev_self(scifdev)) {
+ kfree(va);
+ } else {
+ if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
+ local = local - scifdev->base_addr;
+ dma_free_coherent(&scifdev->sdev->dev,
+ size, va, local);
+ }
+}
+
+static __always_inline int
+scif_map_single(dma_addr_t *dma_handle,
+ void *local, struct scif_dev *scifdev, size_t size)
+{
+ int err = 0;
+
+ if (scifdev_self(scifdev)) {
+ *dma_handle = virt_to_phys((local));
+ } else {
+ *dma_handle = dma_map_single(&scifdev->sdev->dev,
+ local, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&scifdev->sdev->dev, *dma_handle))
+ err = -ENOMEM;
+ else if (scifdev_is_p2p(scifdev))
+ *dma_handle = *dma_handle + scifdev->base_addr;
+ }
+ if (err)
+ *dma_handle = 0;
+ return err;
+}
+
+static __always_inline void
+scif_unmap_single(dma_addr_t local, struct scif_dev *scifdev,
+ size_t size)
+{
+ if (!scifdev_self(scifdev)) {
+ if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
+ local = local - scifdev->base_addr;
+ dma_unmap_single(&scifdev->sdev->dev, local,
+ size, DMA_BIDIRECTIONAL);
+ }
+}
+
+static __always_inline void *
+scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
+{
+ void *out_virt;
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ if (scifdev_self(scifdev))
+ out_virt = phys_to_virt(phys);
+ else
+ out_virt = (void __force *)
+ sdev->hw_ops->ioremap(sdev, phys, size);
+ return out_virt;
+}
+
+static __always_inline void
+scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
+{
+ if (!scifdev_self(scifdev)) {
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt);
+ }
+}
+#endif /* SCIF_MAP_H */
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
new file mode 100644
index 000000000000..9b4c5382d6a7
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -0,0 +1,237 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "scif_peer_bus.h"
+
+#include "scif_main.h"
+#include "scif_map.h"
+
+/**
+ * scif_invalidate_ep() - Set state for all connected endpoints
+ * to disconnected and wake up all send/recv waitqueues
+ */
+static void scif_invalidate_ep(int node)
+{
+ struct scif_endpt *ep;
+ struct list_head *pos, *tmpq;
+
+ flush_work(&scif_info.conn_work);
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ if (ep->remote_dev->node == node) {
+ spin_lock(&ep->lock);
+ scif_cleanup_ep_qp(ep);
+ spin_unlock(&ep->lock);
+ }
+ }
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ if (ep->remote_dev->node == node) {
+ list_del(pos);
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_DISCONNECTED;
+ list_add_tail(&ep->list, &scif_info.disconnected);
+ scif_cleanup_ep_qp(ep);
+ wake_up_interruptible(&ep->sendwq);
+ wake_up_interruptible(&ep->recvwq);
+ spin_unlock(&ep->lock);
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+}
+
+void scif_free_qp(struct scif_dev *scifdev)
+{
+ struct scif_qp *qp = scifdev->qpairs;
+
+ if (!qp)
+ return;
+ scif_free_coherent((void *)qp->inbound_q.rb_base,
+ qp->local_buf, scifdev, qp->inbound_q.size);
+ scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
+ kfree(scifdev->qpairs);
+ scifdev->qpairs = NULL;
+}
+
+static void scif_cleanup_qp(struct scif_dev *dev)
+{
+ struct scif_qp *qp = &dev->qpairs[0];
+
+ if (!qp)
+ return;
+ scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
+ scif_iounmap((void *)qp->outbound_q.rb_base,
+ sizeof(struct scif_qp), dev);
+ qp->remote_qp = NULL;
+ qp->local_write = 0;
+ qp->inbound_q.current_write_offset = 0;
+ qp->inbound_q.current_read_offset = 0;
+ if (scifdev_is_p2p(dev))
+ scif_free_qp(dev);
+}
+
+void scif_send_acks(struct scif_dev *dev)
+{
+ struct scifmsg msg;
+
+ if (dev->node_remove_ack_pending) {
+ msg.uop = SCIF_NODE_REMOVE_ACK;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = SCIF_MGMT_NODE;
+ msg.payload[0] = dev->node;
+ scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
+ dev->node_remove_ack_pending = false;
+ }
+ if (dev->exit_ack_pending) {
+ msg.uop = SCIF_EXIT_ACK;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = dev->node;
+ scif_nodeqp_send(dev, &msg);
+ dev->exit_ack_pending = false;
+ }
+}
+
+/*
+ * scif_cleanup_scifdev
+ *
+ * @dev: Remote SCIF device.
+ * Uninitialize SCIF data structures for remote SCIF device.
+ */
+void scif_cleanup_scifdev(struct scif_dev *dev)
+{
+ struct scif_hw_dev *sdev = dev->sdev;
+
+ if (!dev->sdev)
+ return;
+ if (scifdev_is_p2p(dev)) {
+ if (dev->cookie) {
+ sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
+ dev->cookie = NULL;
+ }
+ scif_destroy_intr_wq(dev);
+ }
+ scif_destroy_p2p(dev);
+ scif_invalidate_ep(dev->node);
+ scif_send_acks(dev);
+ if (!dev->node && scif_info.card_initiated_exit) {
+ /*
+ * Send an SCIF_EXIT message which is the last message from MIC
+ * to the Host and wait for a SCIF_EXIT_ACK
+ */
+ scif_send_exit(dev);
+ scif_info.card_initiated_exit = false;
+ }
+ scif_cleanup_qp(dev);
+}
+
+/*
+ * scif_remove_node:
+ *
+ * @node: Node to remove
+ */
+void scif_handle_remove_node(int node)
+{
+ struct scif_dev *scifdev = &scif_dev[node];
+ struct scif_peer_dev *spdev;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ rcu_read_unlock();
+ if (spdev)
+ scif_peer_unregister_device(spdev);
+ else
+ scif_send_acks(scifdev);
+}
+
+static int scif_send_rmnode_msg(int node, int remove_node)
+{
+ struct scifmsg notif_msg;
+ struct scif_dev *dev = &scif_dev[node];
+
+ notif_msg.uop = SCIF_NODE_REMOVE;
+ notif_msg.src.node = scif_info.nodeid;
+ notif_msg.dst.node = node;
+ notif_msg.payload[0] = remove_node;
+ return scif_nodeqp_send(dev, &notif_msg);
+}
+
+/**
+ * scif_node_disconnect:
+ *
+ * @node_id[in]: source node id.
+ * @mgmt_initiated: Disconnection initiated from the mgmt node
+ *
+ * Disconnect a node from the scif network.
+ */
+void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
+{
+ int ret;
+ int msg_cnt = 0;
+ u32 i = 0;
+ struct scif_dev *scifdev = &scif_dev[node_id];
+
+ if (!node_id)
+ return;
+
+ atomic_set(&scifdev->disconn_rescnt, 0);
+
+ /* Destroy p2p network */
+ for (i = 1; i <= scif_info.maxid; i++) {
+ if (i == node_id)
+ continue;
+ ret = scif_send_rmnode_msg(i, node_id);
+ if (!ret)
+ msg_cnt++;
+ }
+ /* Wait for the remote nodes to respond with SCIF_NODE_REMOVE_ACK */
+ ret = wait_event_timeout(scifdev->disconn_wq,
+ (atomic_read(&scifdev->disconn_rescnt)
+ == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
+ /* Tell the card to clean up */
+ if (mgmt_initiated && _scifdev_alive(scifdev))
+ /*
+ * Send an SCIF_EXIT message which is the last message from Host
+ * to the MIC and wait for a SCIF_EXIT_ACK
+ */
+ scif_send_exit(scifdev);
+ atomic_set(&scifdev->disconn_rescnt, 0);
+ /* Tell the mgmt node to clean up */
+ ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
+ if (!ret)
+ /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */
+ wait_event_timeout(scifdev->disconn_wq,
+ (atomic_read(&scifdev->disconn_rescnt) == 1),
+ SCIF_NODE_ALIVE_TIMEOUT);
+}
+
+void scif_get_node_info(void)
+{
+ struct scifmsg msg;
+ DECLARE_COMPLETION_ONSTACK(node_info);
+
+ msg.uop = SCIF_GET_NODE_INFO;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = SCIF_MGMT_NODE;
+ msg.payload[3] = (u64)&node_info;
+
+ if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
+ return;
+
+ /* Wait for a response with SCIF_GET_NODE_INFO */
+ wait_for_completion(&node_info);
+}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
new file mode 100644
index 000000000000..41e3bdb10061
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -0,0 +1,1312 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "../bus/scif_bus.h"
+#include "scif_peer_bus.h"
+#include "scif_main.h"
+#include "scif_nodeqp.h"
+#include "scif_map.h"
+
+/*
+ ************************************************************************
+ * SCIF node Queue Pair (QP) setup flow:
+ *
+ * 1) SCIF driver gets probed with a scif_hw_dev via the scif_hw_bus
+ * 2) scif_setup_qp(..) allocates the local qp and calls
+ * scif_setup_qp_connect(..) which allocates and maps the local
+ * buffer for the inbound QP
+ * 3) The local node updates the device page with the DMA address of the QP
+ * 4) A delayed work is scheduled (qp_dwork) which periodically reads if
+ * the peer node has updated its QP DMA address
+ * 5) Once a valid non zero address is found in the QP DMA address field
+ * in the device page, the local node maps the remote node's QP,
+ * updates its outbound QP and sends a SCIF_INIT message to the peer
+ * 6) The SCIF_INIT message is received by the peer node QP interrupt bottom
+ * half handler by calling scif_init(..)
+ * 7) scif_init(..) registers a new SCIF peer node by calling
+ * scif_peer_register_device(..) which signifies the addition of a new
+ * SCIF node
+ * 8) On the mgmt node, P2P network setup/teardown is initiated if all the
+ * remote nodes are online via scif_p2p_setup(..)
+ * 9) For P2P setup, the host maps the remote nodes' aperture and memory
+ * bars and sends a SCIF_NODE_ADD message to both nodes
+ * 10) As part of scif_nodeadd, both nodes set up their local inbound
+ * QPs and send a SCIF_NODE_ADD_ACK to the mgmt node
+ * 11) As part of scif_node_add_ack(..) the mgmt node forwards the
+ * SCIF_NODE_ADD_ACK to the remote nodes
+ * 12) As part of scif_node_add_ack(..) the remote nodes update their
+ * outbound QPs, make sure they can access memory on the remote node
+ * and then add a new SCIF peer node by calling
+ * scif_peer_register_device(..) which signifies the addition of a new
+ * SCIF node.
+ * 13) The SCIF network is now established across all nodes.
+ *
+ ************************************************************************
+ * SCIF node QP teardown flow (initiated by non mgmt node):
+ *
+ * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
+ * 2) The device page QP DMA address field is updated with 0x0
+ * 3) A non mgmt node now cleans up all local data structures and sends a
+ * SCIF_EXIT message to the peer and waits for a SCIF_EXIT_ACK
+ * 4) As part of scif_exit(..) handling scif_disconnect_node(..) is called
+ * 5) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the
+ * peers and waits for a SCIF_NODE_REMOVE_ACK
+ * 6) As part of scif_node_remove(..) a remote node unregisters the peer
+ * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
+ * 7) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
+ * it sends itself a node remove message whose handling cleans up local
+ * data structures and unregisters the peer node from the SCIF network
+ * 8) The mgmt node sends a SCIF_EXIT_ACK
+ * 9) Upon receipt of the SCIF_EXIT_ACK the node initiating the teardown
+ * completes the SCIF remove routine
+ * 10) The SCIF network is now torn down for the node initiating the
+ * teardown sequence
+ *
+ ************************************************************************
+ * SCIF node QP teardown flow (initiated by mgmt node):
+ *
+ * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
+ * 2) The device page QP DMA address field is updated with 0x0
+ * 3) The mgmt node calls scif_disconnect_node(..)
+ * 4) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the peers
+ * and waits for a SCIF_NODE_REMOVE_ACK
+ * 5) As part of scif_node_remove(..) a remote node unregisters the peer
+ * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
+ * 6) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
+ * it unregisters the peer node from the SCIF network
+ * 7) The mgmt node sends a SCIF_EXIT message and waits for a SCIF_EXIT_ACK.
+ * 8) A non mgmt node upon receipt of a SCIF_EXIT message calls scif_stop(..)
+ * which would clean up local data structures for all SCIF nodes and
+ * then send a SCIF_EXIT_ACK back to the mgmt node
+ * 9) Upon receipt of the SCIF_EXIT_ACK the the mgmt node sends itself a node
+ * remove message whose handling cleans up local data structures and
+ * destroys any P2P mappings.
+ * 10) The SCIF hardware device for which a remove callback was received is now
+ * disconnected from the SCIF network.
+ */
+/*
+ * Initializes "local" data structures for the QP. Allocates the QP
+ * ring buffer (rb) and initializes the "in bound" queue.
+ */
+int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
+ int local_size, struct scif_dev *scifdev)
+{
+ void *local_q = NULL;
+ int err = 0;
+ u32 tmp_rd = 0;
+
+ spin_lock_init(&qp->send_lock);
+ spin_lock_init(&qp->recv_lock);
+
+ local_q = kzalloc(local_size, GFP_KERNEL);
+ if (!local_q) {
+ err = -ENOMEM;
+ return err;
+ }
+ err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
+ if (err)
+ goto kfree;
+ /*
+ * To setup the inbound_q, the buffer lives locally, the read pointer
+ * is remote and the write pointer is local.
+ */
+ scif_rb_init(&qp->inbound_q,
+ &tmp_rd,
+ &qp->local_write,
+ local_q, get_count_order(local_size));
+ /*
+ * The read pointer is NULL initially and it is unsafe to use the ring
+ * buffer til this changes!
+ */
+ qp->inbound_q.read_ptr = NULL;
+ err = scif_map_single(qp_offset, qp,
+ scifdev, sizeof(struct scif_qp));
+ if (err)
+ goto unmap;
+ qp->local_qp = *qp_offset;
+ return err;
+unmap:
+ scif_unmap_single(qp->local_buf, scifdev, local_size);
+ qp->local_buf = 0;
+kfree:
+ kfree(local_q);
+ return err;
+}
+
+/* When the other side has already done it's allocation, this is called */
+int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
+ dma_addr_t phys, int local_size,
+ struct scif_dev *scifdev)
+{
+ void *local_q;
+ void *remote_q;
+ struct scif_qp *remote_qp;
+ int remote_size;
+ int err = 0;
+
+ spin_lock_init(&qp->send_lock);
+ spin_lock_init(&qp->recv_lock);
+ /* Start by figuring out where we need to point */
+ remote_qp = scif_ioremap(phys, sizeof(struct scif_qp), scifdev);
+ if (!remote_qp)
+ return -EIO;
+ qp->remote_qp = remote_qp;
+ if (qp->remote_qp->magic != SCIFEP_MAGIC) {
+ err = -EIO;
+ goto iounmap;
+ }
+ qp->remote_buf = remote_qp->local_buf;
+ remote_size = qp->remote_qp->inbound_q.size;
+ remote_q = scif_ioremap(qp->remote_buf, remote_size, scifdev);
+ if (!remote_q) {
+ err = -EIO;
+ goto iounmap;
+ }
+ qp->remote_qp->local_write = 0;
+ /*
+ * To setup the outbound_q, the buffer lives in remote memory,
+ * the read pointer is local, the write pointer is remote
+ */
+ scif_rb_init(&qp->outbound_q,
+ &qp->local_read,
+ &qp->remote_qp->local_write,
+ remote_q,
+ get_count_order(remote_size));
+ local_q = kzalloc(local_size, GFP_KERNEL);
+ if (!local_q) {
+ err = -ENOMEM;
+ goto iounmap_1;
+ }
+ err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
+ if (err)
+ goto kfree;
+ qp->remote_qp->local_read = 0;
+ /*
+ * To setup the inbound_q, the buffer lives locally, the read pointer
+ * is remote and the write pointer is local
+ */
+ scif_rb_init(&qp->inbound_q,
+ &qp->remote_qp->local_read,
+ &qp->local_write,
+ local_q, get_count_order(local_size));
+ err = scif_map_single(qp_offset, qp, scifdev,
+ sizeof(struct scif_qp));
+ if (err)
+ goto unmap;
+ qp->local_qp = *qp_offset;
+ return err;
+unmap:
+ scif_unmap_single(qp->local_buf, scifdev, local_size);
+ qp->local_buf = 0;
+kfree:
+ kfree(local_q);
+iounmap_1:
+ scif_iounmap(remote_q, remote_size, scifdev);
+ qp->outbound_q.rb_base = NULL;
+iounmap:
+ scif_iounmap(qp->remote_qp, sizeof(struct scif_qp), scifdev);
+ qp->remote_qp = NULL;
+ return err;
+}
+
+int scif_setup_qp_connect_response(struct scif_dev *scifdev,
+ struct scif_qp *qp, u64 payload)
+{
+ int err = 0;
+ void *r_buf;
+ int remote_size;
+ phys_addr_t tmp_phys;
+
+ qp->remote_qp = scif_ioremap(payload, sizeof(struct scif_qp), scifdev);
+
+ if (!qp->remote_qp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ if (qp->remote_qp->magic != SCIFEP_MAGIC) {
+ dev_err(&scifdev->sdev->dev,
+ "SCIFEP_MAGIC mismatch between self %d remote %d\n",
+ scif_dev[scif_info.nodeid].node, scifdev->node);
+ err = -ENODEV;
+ goto error;
+ }
+
+ tmp_phys = qp->remote_qp->local_buf;
+ remote_size = qp->remote_qp->inbound_q.size;
+ r_buf = scif_ioremap(tmp_phys, remote_size, scifdev);
+
+ if (!r_buf)
+ return -EIO;
+
+ qp->local_read = 0;
+ scif_rb_init(&qp->outbound_q,
+ &qp->local_read,
+ &qp->remote_qp->local_write,
+ r_buf,
+ get_count_order(remote_size));
+ /*
+ * resetup the inbound_q now that we know where the
+ * inbound_read really is.
+ */
+ scif_rb_init(&qp->inbound_q,
+ &qp->remote_qp->local_read,
+ &qp->local_write,
+ qp->inbound_q.rb_base,
+ get_count_order(qp->inbound_q.size));
+error:
+ return err;
+}
+
+static __always_inline void
+scif_send_msg_intr(struct scif_dev *scifdev)
+{
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ if (scifdev_is_p2p(scifdev))
+ sdev->hw_ops->send_p2p_intr(sdev, scifdev->rdb, &scifdev->mmio);
+ else
+ sdev->hw_ops->send_intr(sdev, scifdev->rdb);
+}
+
+int scif_qp_response(phys_addr_t phys, struct scif_dev *scifdev)
+{
+ int err = 0;
+ struct scifmsg msg;
+
+ err = scif_setup_qp_connect_response(scifdev, scifdev->qpairs, phys);
+ if (!err) {
+ /*
+ * Now that everything is setup and mapped, we're ready
+ * to tell the peer about our queue's location
+ */
+ msg.uop = SCIF_INIT;
+ msg.dst.node = scifdev->node;
+ err = scif_nodeqp_send(scifdev, &msg);
+ }
+ return err;
+}
+
+void scif_send_exit(struct scif_dev *scifdev)
+{
+ struct scifmsg msg;
+ int ret;
+
+ scifdev->exit = OP_IN_PROGRESS;
+ msg.uop = SCIF_EXIT;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = scifdev->node;
+ ret = scif_nodeqp_send(scifdev, &msg);
+ if (ret)
+ goto done;
+ /* Wait for a SCIF_EXIT_ACK message */
+ wait_event_timeout(scif_info.exitwq, scifdev->exit == OP_COMPLETED,
+ SCIF_NODE_ALIVE_TIMEOUT);
+done:
+ scifdev->exit = OP_IDLE;
+}
+
+int scif_setup_qp(struct scif_dev *scifdev)
+{
+ int err = 0;
+ int local_size;
+ struct scif_qp *qp;
+
+ local_size = SCIF_NODE_QP_SIZE;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ err = -ENOMEM;
+ return err;
+ }
+ qp->magic = SCIFEP_MAGIC;
+ scifdev->qpairs = qp;
+ err = scif_setup_qp_connect(qp, &scifdev->qp_dma_addr,
+ local_size, scifdev);
+ if (err)
+ goto free_qp;
+ /*
+ * We're as setup as we can be. The inbound_q is setup, w/o a usable
+ * outbound q. When we get a message, the read_ptr will be updated,
+ * and we will pull the message.
+ */
+ return err;
+free_qp:
+ kfree(scifdev->qpairs);
+ scifdev->qpairs = NULL;
+ return err;
+}
+
+static void scif_p2p_freesg(struct scatterlist *sg)
+{
+ kfree(sg);
+}
+
+static struct scatterlist *
+scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ int i;
+
+ sg = kcalloc(page_cnt, sizeof(struct scatterlist), GFP_KERNEL);
+ if (!sg)
+ return NULL;
+ sg_init_table(sg, page_cnt);
+ for (i = 0; i < page_cnt; i++) {
+ page = vmalloc_to_page((void __force *)va);
+ if (!page)
+ goto p2p_sg_err;
+ sg_set_page(&sg[i], page, page_size, 0);
+ va += page_size;
+ }
+ return sg;
+p2p_sg_err:
+ kfree(sg);
+ return NULL;
+}
+
+/* Init p2p mappings required to access peerdev from scifdev */
+static struct scif_p2p_info *
+scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
+{
+ struct scif_p2p_info *p2p;
+ int num_mmio_pages, num_aper_pages, sg_page_shift, err, num_aper_chunks;
+ struct scif_hw_dev *psdev = peerdev->sdev;
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ num_mmio_pages = psdev->mmio->len >> PAGE_SHIFT;
+ num_aper_pages = psdev->aper->len >> PAGE_SHIFT;
+
+ p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
+ if (!p2p)
+ return NULL;
+ p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+ PAGE_SIZE, num_mmio_pages);
+ if (!p2p->ppi_sg[SCIF_PPI_MMIO])
+ goto free_p2p;
+ p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
+ sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
+ num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
+ p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+ 1 << sg_page_shift,
+ num_aper_chunks);
+ p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
+ err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
+ num_mmio_pages, PCI_DMA_BIDIRECTIONAL);
+ if (err != num_mmio_pages)
+ goto scif_p2p_free;
+ err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
+ num_aper_chunks, PCI_DMA_BIDIRECTIONAL);
+ if (err != num_aper_chunks)
+ goto dma_unmap;
+ p2p->ppi_da[SCIF_PPI_MMIO] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ p2p->ppi_da[SCIF_PPI_APER] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_APER]);
+ p2p->ppi_len[SCIF_PPI_MMIO] = num_mmio_pages;
+ p2p->ppi_len[SCIF_PPI_APER] = num_aper_pages;
+ p2p->ppi_peer_id = peerdev->node;
+ return p2p;
+dma_unmap:
+ dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
+ p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
+scif_p2p_free:
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
+free_p2p:
+ kfree(p2p);
+ return NULL;
+}
+
+/**
+ * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
+ * @dst: Destination node
+ *
+ * Connect the src and dst node by setting up the p2p connection
+ * between them. Management node here acts like a proxy.
+ */
+static void scif_node_connect(struct scif_dev *scifdev, int dst)
+{
+ struct scif_dev *dev_j = scifdev;
+ struct scif_dev *dev_i = NULL;
+ struct scif_p2p_info *p2p_ij = NULL; /* bus addr for j from i */
+ struct scif_p2p_info *p2p_ji = NULL; /* bus addr for i from j */
+ struct scif_p2p_info *p2p;
+ struct list_head *pos, *tmp;
+ struct scifmsg msg;
+ int err;
+ u64 tmppayload;
+
+ if (dst < 1 || dst > scif_info.maxid)
+ return;
+
+ dev_i = &scif_dev[dst];
+
+ if (!_scifdev_alive(dev_i))
+ return;
+ /*
+ * If the p2p connection is already setup or in the process of setting
+ * up then just ignore this request. The requested node will get
+ * informed by SCIF_NODE_ADD_ACK or SCIF_NODE_ADD_NACK
+ */
+ if (!list_empty(&dev_i->p2p)) {
+ list_for_each_safe(pos, tmp, &dev_i->p2p) {
+ p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
+ if (p2p->ppi_peer_id == dev_j->node)
+ return;
+ }
+ }
+ p2p_ij = scif_init_p2p_info(dev_i, dev_j);
+ if (!p2p_ij)
+ return;
+ p2p_ji = scif_init_p2p_info(dev_j, dev_i);
+ if (!p2p_ji)
+ return;
+ list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p);
+ list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p);
+
+ /*
+ * Send a SCIF_NODE_ADD to dev_i, pass it its bus address
+ * as seen from dev_j
+ */
+ msg.uop = SCIF_NODE_ADD;
+ msg.src.node = dev_j->node;
+ msg.dst.node = dev_i->node;
+
+ msg.payload[0] = p2p_ji->ppi_da[SCIF_PPI_APER];
+ msg.payload[1] = p2p_ij->ppi_da[SCIF_PPI_MMIO];
+ msg.payload[2] = p2p_ij->ppi_da[SCIF_PPI_APER];
+ msg.payload[3] = p2p_ij->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
+
+ err = scif_nodeqp_send(dev_i, &msg);
+ if (err) {
+ dev_err(&scifdev->sdev->dev,
+ "%s %d error %d\n", __func__, __LINE__, err);
+ return;
+ }
+
+ /* Same as above but to dev_j */
+ msg.uop = SCIF_NODE_ADD;
+ msg.src.node = dev_i->node;
+ msg.dst.node = dev_j->node;
+
+ tmppayload = msg.payload[0];
+ msg.payload[0] = msg.payload[2];
+ msg.payload[2] = tmppayload;
+ msg.payload[1] = p2p_ji->ppi_da[SCIF_PPI_MMIO];
+ msg.payload[3] = p2p_ji->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
+
+ scif_nodeqp_send(dev_j, &msg);
+}
+
+static void scif_p2p_setup(void)
+{
+ int i, j;
+
+ if (!scif_info.p2p_enable)
+ return;
+
+ for (i = 1; i <= scif_info.maxid; i++)
+ if (!_scifdev_alive(&scif_dev[i]))
+ return;
+
+ for (i = 1; i <= scif_info.maxid; i++) {
+ for (j = 1; j <= scif_info.maxid; j++) {
+ struct scif_dev *scifdev = &scif_dev[i];
+
+ if (i == j)
+ continue;
+ scif_node_connect(scifdev, j);
+ }
+ }
+}
+
+void scif_qp_response_ack(struct work_struct *work)
+{
+ struct scif_dev *scifdev = container_of(work, struct scif_dev,
+ init_msg_work);
+ struct scif_peer_dev *spdev;
+
+ /* Drop the INIT message if it has already been received */
+ if (_scifdev_alive(scifdev))
+ return;
+
+ spdev = scif_peer_register_device(scifdev);
+ if (IS_ERR(spdev))
+ return;
+
+ if (scif_is_mgmt_node()) {
+ mutex_lock(&scif_info.conflock);
+ scif_p2p_setup();
+ mutex_unlock(&scif_info.conflock);
+ }
+}
+
+static char *message_types[] = {"BAD",
+ "INIT",
+ "EXIT",
+ "SCIF_EXIT_ACK",
+ "SCIF_NODE_ADD",
+ "SCIF_NODE_ADD_ACK",
+ "SCIF_NODE_ADD_NACK",
+ "REMOVE_NODE",
+ "REMOVE_NODE_ACK",
+ "CNCT_REQ",
+ "CNCT_GNT",
+ "CNCT_GNTACK",
+ "CNCT_GNTNACK",
+ "CNCT_REJ",
+ "DISCNCT",
+ "DISCNT_ACK",
+ "CLIENT_SENT",
+ "CLIENT_RCVD",
+ "SCIF_GET_NODE_INFO"};
+
+static void
+scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg,
+ const char *label)
+{
+ if (!scif_info.en_msg_log)
+ return;
+ if (msg->uop > SCIF_MAX_MSG) {
+ dev_err(&scifdev->sdev->dev,
+ "%s: unknown msg type %d\n", label, msg->uop);
+ return;
+ }
+ dev_info(&scifdev->sdev->dev,
+ "%s: msg type %s, src %d:%d, dest %d:%d payload 0x%llx:0x%llx:0x%llx:0x%llx\n",
+ label, message_types[msg->uop], msg->src.node, msg->src.port,
+ msg->dst.node, msg->dst.port, msg->payload[0], msg->payload[1],
+ msg->payload[2], msg->payload[3]);
+}
+
+int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_qp *qp = scifdev->qpairs;
+ int err = -ENOMEM, loop_cnt = 0;
+
+ scif_display_message(scifdev, msg, "Sent");
+ if (!qp) {
+ err = -EINVAL;
+ goto error;
+ }
+ spin_lock(&qp->send_lock);
+
+ while ((err = scif_rb_write(&qp->outbound_q,
+ msg, sizeof(struct scifmsg)))) {
+ mdelay(1);
+#define SCIF_NODEQP_SEND_TO_MSEC (3 * 1000)
+ if (loop_cnt++ > (SCIF_NODEQP_SEND_TO_MSEC)) {
+ err = -ENODEV;
+ break;
+ }
+ }
+ if (!err)
+ scif_rb_commit(&qp->outbound_q);
+ spin_unlock(&qp->send_lock);
+ if (!err) {
+ if (scifdev_self(scifdev))
+ /*
+ * For loopback we need to emulate an interrupt by
+ * queuing work for the queue handling real node
+ * Qp interrupts.
+ */
+ queue_work(scifdev->intr_wq, &scifdev->intr_bh);
+ else
+ scif_send_msg_intr(scifdev);
+ }
+error:
+ if (err)
+ dev_dbg(&scifdev->sdev->dev,
+ "%s %d error %d uop %d\n",
+ __func__, __LINE__, err, msg->uop);
+ return err;
+}
+
+/**
+ * scif_nodeqp_send - Send a message on the node queue pair
+ * @scifdev: Scif Device.
+ * @msg: The message to be sent.
+ */
+int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ int err;
+ struct device *spdev = NULL;
+
+ if (msg->uop > SCIF_EXIT_ACK) {
+ /* Dont send messages once the exit flow has begun */
+ if (OP_IDLE != scifdev->exit)
+ return -ENODEV;
+ spdev = scif_get_peer_dev(scifdev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ return err;
+ }
+ }
+ err = _scif_nodeqp_send(scifdev, msg);
+ if (msg->uop > SCIF_EXIT_ACK)
+ scif_put_peer_dev(spdev);
+ return err;
+}
+
+/*
+ * scif_misc_handler:
+ *
+ * Work queue handler for servicing miscellaneous SCIF tasks.
+ * Examples include:
+ * 1) Cleanup of zombie endpoints.
+ */
+void scif_misc_handler(struct work_struct *work)
+{
+ scif_cleanup_zombie_epd();
+}
+
+/**
+ * scif_init() - Respond to SCIF_INIT interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ */
+static __always_inline void
+scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ /*
+ * Allow the thread waiting for device page updates for the peer QP DMA
+ * address to complete initializing the inbound_q.
+ */
+ flush_delayed_work(&scifdev->qp_dwork);
+ /*
+ * Delegate the peer device registration to a workqueue, otherwise if
+ * SCIF client probe (called during peer device registration) calls
+ * scif_connect(..), it will block the message processing thread causing
+ * a deadlock.
+ */
+ schedule_work(&scifdev->init_msg_work);
+}
+
+/**
+ * scif_exit() - Respond to SCIF_EXIT interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ * This function stops the SCIF interface for the node which sent
+ * the SCIF_EXIT message and starts waiting for that node to
+ * resetup the queue pair again.
+ */
+static __always_inline void
+scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
+{
+ scifdev->exit_ack_pending = true;
+ if (scif_is_mgmt_node())
+ scif_disconnect_node(scifdev->node, false);
+ else
+ scif_stop(scifdev);
+ schedule_delayed_work(&scifdev->qp_dwork,
+ msecs_to_jiffies(1000));
+}
+
+/**
+ * scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ */
+static __always_inline void
+scif_exit_ack(struct scif_dev *scifdev, struct scifmsg *unused)
+{
+ scifdev->exit = OP_COMPLETED;
+ wake_up(&scif_info.exitwq);
+}
+
+/**
+ * scif_node_add() - Respond to SCIF_NODE_ADD interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ * When the mgmt node driver has finished initializing a MIC node queue pair it
+ * marks the node as online. It then looks for all currently online MIC cards
+ * and send a SCIF_NODE_ADD message to identify the ID of the new card for
+ * peer to peer initialization
+ *
+ * The local node allocates its incoming queue and sends its address in the
+ * SCIF_NODE_ADD_ACK message back to the mgmt node, the mgmt node "reflects"
+ * this message to the new node
+ */
+static __always_inline void
+scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_dev *newdev;
+ dma_addr_t qp_offset;
+ int qp_connect;
+ struct scif_hw_dev *sdev;
+
+ dev_dbg(&scifdev->sdev->dev,
+ "Scifdev %d:%d received NODE_ADD msg for node %d\n",
+ scifdev->node, msg->dst.node, msg->src.node);
+ dev_dbg(&scifdev->sdev->dev,
+ "Remote address for this node's aperture %llx\n",
+ msg->payload[0]);
+ newdev = &scif_dev[msg->src.node];
+ newdev->node = msg->src.node;
+ newdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
+ sdev = newdev->sdev;
+
+ if (scif_setup_intr_wq(newdev)) {
+ dev_err(&scifdev->sdev->dev,
+ "failed to setup interrupts for %d\n", msg->src.node);
+ goto interrupt_setup_error;
+ }
+ newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
+ if (!newdev->mmio.va) {
+ dev_err(&scifdev->sdev->dev,
+ "failed to map mmio for %d\n", msg->src.node);
+ goto mmio_map_error;
+ }
+ newdev->qpairs = kzalloc(sizeof(*newdev->qpairs), GFP_KERNEL);
+ if (!newdev->qpairs)
+ goto qp_alloc_error;
+ /*
+ * Set the base address of the remote node's memory since it gets
+ * added to qp_offset
+ */
+ newdev->base_addr = msg->payload[0];
+
+ qp_connect = scif_setup_qp_connect(newdev->qpairs, &qp_offset,
+ SCIF_NODE_QP_SIZE, newdev);
+ if (qp_connect) {
+ dev_err(&scifdev->sdev->dev,
+ "failed to setup qp_connect %d\n", qp_connect);
+ goto qp_connect_error;
+ }
+
+ newdev->db = sdev->hw_ops->next_db(sdev);
+ newdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
+ "SCIF_INTR", newdev,
+ newdev->db);
+ if (IS_ERR(newdev->cookie))
+ goto qp_connect_error;
+ newdev->qpairs->magic = SCIFEP_MAGIC;
+ newdev->qpairs->qp_state = SCIF_QP_OFFLINE;
+
+ msg->uop = SCIF_NODE_ADD_ACK;
+ msg->dst.node = msg->src.node;
+ msg->src.node = scif_info.nodeid;
+ msg->payload[0] = qp_offset;
+ msg->payload[2] = newdev->db;
+ scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
+ return;
+qp_connect_error:
+ kfree(newdev->qpairs);
+ newdev->qpairs = NULL;
+qp_alloc_error:
+ iounmap(newdev->mmio.va);
+ newdev->mmio.va = NULL;
+mmio_map_error:
+interrupt_setup_error:
+ dev_err(&scifdev->sdev->dev,
+ "node add failed for node %d\n", msg->src.node);
+ msg->uop = SCIF_NODE_ADD_NACK;
+ msg->dst.node = msg->src.node;
+ msg->src.node = scif_info.nodeid;
+ scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
+}
+
+void scif_poll_qp_state(struct work_struct *work)
+{
+#define SCIF_NODE_QP_RETRY 100
+#define SCIF_NODE_QP_TIMEOUT 100
+ struct scif_dev *peerdev = container_of(work, struct scif_dev,
+ p2p_dwork.work);
+ struct scif_qp *qp = &peerdev->qpairs[0];
+
+ if (qp->qp_state != SCIF_QP_ONLINE ||
+ qp->remote_qp->qp_state != SCIF_QP_ONLINE) {
+ if (peerdev->p2p_retry++ == SCIF_NODE_QP_RETRY) {
+ dev_err(&peerdev->sdev->dev,
+ "Warning: QP check timeout with state %d\n",
+ qp->qp_state);
+ goto timeout;
+ }
+ schedule_delayed_work(&peerdev->p2p_dwork,
+ msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT));
+ return;
+ }
+ scif_peer_register_device(peerdev);
+ return;
+timeout:
+ dev_err(&peerdev->sdev->dev,
+ "%s %d remote node %d offline, state = 0x%x\n",
+ __func__, __LINE__, peerdev->node, qp->qp_state);
+ qp->remote_qp->qp_state = SCIF_QP_OFFLINE;
+ scif_cleanup_scifdev(peerdev);
+}
+
+/**
+ * scif_node_add_ack() - Respond to SCIF_NODE_ADD_ACK interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ * After a MIC node receives the SCIF_NODE_ADD_ACK message it send this
+ * message to the mgmt node to confirm the sequence is finished.
+ *
+ */
+static __always_inline void
+scif_node_add_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_dev *peerdev;
+ struct scif_qp *qp;
+ struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
+
+ dev_dbg(&scifdev->sdev->dev,
+ "Scifdev %d received SCIF_NODE_ADD_ACK msg src %d dst %d\n",
+ scifdev->node, msg->src.node, msg->dst.node);
+ dev_dbg(&scifdev->sdev->dev,
+ "payload %llx %llx %llx %llx\n", msg->payload[0],
+ msg->payload[1], msg->payload[2], msg->payload[3]);
+ if (scif_is_mgmt_node()) {
+ /*
+ * the lock serializes with scif_qp_response_ack. The mgmt node
+ * is forwarding the NODE_ADD_ACK message from src to dst we
+ * need to make sure that the dst has already received a
+ * NODE_ADD for src and setup its end of the qp to dst
+ */
+ mutex_lock(&scif_info.conflock);
+ msg->payload[1] = scif_info.maxid;
+ scif_nodeqp_send(dst_dev, msg);
+ mutex_unlock(&scif_info.conflock);
+ return;
+ }
+ peerdev = &scif_dev[msg->src.node];
+ peerdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
+ peerdev->node = msg->src.node;
+
+ qp = &peerdev->qpairs[0];
+
+ if ((scif_setup_qp_connect_response(peerdev, &peerdev->qpairs[0],
+ msg->payload[0])))
+ goto local_error;
+ peerdev->rdb = msg->payload[2];
+ qp->remote_qp->qp_state = SCIF_QP_ONLINE;
+ schedule_delayed_work(&peerdev->p2p_dwork, 0);
+ return;
+local_error:
+ scif_cleanup_scifdev(peerdev);
+}
+
+/**
+ * scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
+ * @msg: Interrupt message
+ *
+ * SCIF_NODE_ADD failed, so inform the waiting wq.
+ */
+static __always_inline void
+scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ if (scif_is_mgmt_node()) {
+ struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
+
+ dev_dbg(&scifdev->sdev->dev,
+ "SCIF_NODE_ADD_NACK received from %d\n", scifdev->node);
+ scif_nodeqp_send(dst_dev, msg);
+ }
+}
+
+/*
+ * scif_node_remove: Handle SCIF_NODE_REMOVE message
+ * @msg: Interrupt message
+ *
+ * Handle node removal.
+ */
+static __always_inline void
+scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ int node = msg->payload[0];
+ struct scif_dev *scdev = &scif_dev[node];
+
+ scdev->node_remove_ack_pending = true;
+ scif_handle_remove_node(node);
+}
+
+/*
+ * scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
+ * @msg: Interrupt message
+ *
+ * The peer has acked a SCIF_NODE_REMOVE message.
+ */
+static __always_inline void
+scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_dev *sdev = &scif_dev[msg->payload[0]];
+
+ atomic_inc(&sdev->disconn_rescnt);
+ wake_up(&sdev->disconn_wq);
+}
+
+/**
+ * scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
+ * @msg: Interrupt message
+ *
+ * Retrieve node info i.e maxid and total from the mgmt node.
+ */
+static __always_inline void
+scif_get_node_info_resp(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ if (scif_is_mgmt_node()) {
+ swap(msg->dst.node, msg->src.node);
+ mutex_lock(&scif_info.conflock);
+ msg->payload[1] = scif_info.maxid;
+ msg->payload[2] = scif_info.total;
+ mutex_unlock(&scif_info.conflock);
+ scif_nodeqp_send(scifdev, msg);
+ } else {
+ struct completion *node_info =
+ (struct completion *)msg->payload[3];
+
+ mutex_lock(&scif_info.conflock);
+ scif_info.maxid = msg->payload[1];
+ scif_info.total = msg->payload[2];
+ complete_all(node_info);
+ mutex_unlock(&scif_info.conflock);
+ }
+}
+
+static void
+scif_msg_unknown(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ /* Bogus Node Qp Message? */
+ dev_err(&scifdev->sdev->dev,
+ "Unknown message 0x%xn scifdev->node 0x%x\n",
+ msg->uop, scifdev->node);
+}
+
+static void (*scif_intr_func[SCIF_MAX_MSG + 1])
+ (struct scif_dev *, struct scifmsg *msg) = {
+ scif_msg_unknown, /* Error */
+ scif_init, /* SCIF_INIT */
+ scif_exit, /* SCIF_EXIT */
+ scif_exit_ack, /* SCIF_EXIT_ACK */
+ scif_node_add, /* SCIF_NODE_ADD */
+ scif_node_add_ack, /* SCIF_NODE_ADD_ACK */
+ scif_node_add_nack, /* SCIF_NODE_ADD_NACK */
+ scif_node_remove, /* SCIF_NODE_REMOVE */
+ scif_node_remove_ack, /* SCIF_NODE_REMOVE_ACK */
+ scif_cnctreq, /* SCIF_CNCT_REQ */
+ scif_cnctgnt, /* SCIF_CNCT_GNT */
+ scif_cnctgnt_ack, /* SCIF_CNCT_GNTACK */
+ scif_cnctgnt_nack, /* SCIF_CNCT_GNTNACK */
+ scif_cnctrej, /* SCIF_CNCT_REJ */
+ scif_discnct, /* SCIF_DISCNCT */
+ scif_discnt_ack, /* SCIF_DISCNT_ACK */
+ scif_clientsend, /* SCIF_CLIENT_SENT */
+ scif_clientrcvd, /* SCIF_CLIENT_RCVD */
+ scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */
+};
+
+/**
+ * scif_nodeqp_msg_handler() - Common handler for node messages
+ * @scifdev: Remote device to respond to
+ * @qp: Remote memory pointer
+ * @msg: The message to be handled.
+ *
+ * This routine calls the appropriate routine to handle a Node Qp
+ * message receipt
+ */
+static int scif_max_msg_id = SCIF_MAX_MSG;
+
+static void
+scif_nodeqp_msg_handler(struct scif_dev *scifdev,
+ struct scif_qp *qp, struct scifmsg *msg)
+{
+ scif_display_message(scifdev, msg, "Rcvd");
+
+ if (msg->uop > (u32)scif_max_msg_id) {
+ /* Bogus Node Qp Message? */
+ dev_err(&scifdev->sdev->dev,
+ "Unknown message 0x%xn scifdev->node 0x%x\n",
+ msg->uop, scifdev->node);
+ return;
+ }
+
+ scif_intr_func[msg->uop](scifdev, msg);
+}
+
+/**
+ * scif_nodeqp_intrhandler() - Interrupt handler for node messages
+ * @scifdev: Remote device to respond to
+ * @qp: Remote memory pointer
+ *
+ * This routine is triggered by the interrupt mechanism. It reads
+ * messages from the node queue RB and calls the Node QP Message handling
+ * routine.
+ */
+void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
+{
+ struct scifmsg msg;
+ int read_size;
+
+ do {
+ read_size = scif_rb_get_next(&qp->inbound_q, &msg, sizeof(msg));
+ if (!read_size)
+ break;
+ scif_nodeqp_msg_handler(scifdev, qp, &msg);
+ /*
+ * The node queue pair is unmapped so skip the read pointer
+ * update after receipt of a SCIF_EXIT_ACK
+ */
+ if (SCIF_EXIT_ACK == msg.uop)
+ break;
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ } while (1);
+}
+
+/**
+ * scif_loopb_wq_handler - Loopback Workqueue Handler.
+ * @work: loop back work
+ *
+ * This work queue routine is invoked by the loopback work queue handler.
+ * It grabs the recv lock, dequeues any available messages from the head
+ * of the loopback message list, calls the node QP message handler,
+ * waits for it to return, then frees up this message and dequeues more
+ * elements of the list if available.
+ */
+static void scif_loopb_wq_handler(struct work_struct *unused)
+{
+ struct scif_dev *scifdev = scif_info.loopb_dev;
+ struct scif_qp *qp = scifdev->qpairs;
+ struct scif_loopb_msg *msg;
+
+ do {
+ msg = NULL;
+ spin_lock(&qp->recv_lock);
+ if (!list_empty(&scif_info.loopb_recv_q)) {
+ msg = list_first_entry(&scif_info.loopb_recv_q,
+ struct scif_loopb_msg,
+ list);
+ list_del(&msg->list);
+ }
+ spin_unlock(&qp->recv_lock);
+
+ if (msg) {
+ scif_nodeqp_msg_handler(scifdev, qp, &msg->msg);
+ kfree(msg);
+ }
+ } while (msg);
+}
+
+/**
+ * scif_loopb_msg_handler() - Workqueue handler for loopback messages.
+ * @scifdev: SCIF device
+ * @qp: Queue pair.
+ *
+ * This work queue routine is triggered when a loopback message is received.
+ *
+ * We need special handling for receiving Node Qp messages on a loopback SCIF
+ * device via two workqueues for receiving messages.
+ *
+ * The reason we need the extra workqueue which is not required with *normal*
+ * non-loopback SCIF devices is the potential classic deadlock described below:
+ *
+ * Thread A tries to send a message on a loopback SCIF device and blocks since
+ * there is no space in the RB while it has the send_lock held or another
+ * lock called lock X for example.
+ *
+ * Thread B: The Loopback Node QP message receive workqueue receives the message
+ * and tries to send a message (eg an ACK) to the loopback SCIF device. It tries
+ * to grab the send lock again or lock X and deadlocks with Thread A. The RB
+ * cannot be drained any further due to this classic deadlock.
+ *
+ * In order to avoid deadlocks as mentioned above we have an extra level of
+ * indirection achieved by having two workqueues.
+ * 1) The first workqueue whose handler is scif_loopb_msg_handler reads
+ * messages from the Node QP RB, adds them to a list and queues work for the
+ * second workqueue.
+ *
+ * 2) The second workqueue whose handler is scif_loopb_wq_handler dequeues
+ * messages from the list, handles them, frees up the memory and dequeues
+ * more elements from the list if possible.
+ */
+int
+scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp)
+{
+ int read_size;
+ struct scif_loopb_msg *msg;
+
+ do {
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ read_size = scif_rb_get_next(&qp->inbound_q, &msg->msg,
+ sizeof(struct scifmsg));
+ if (read_size != sizeof(struct scifmsg)) {
+ kfree(msg);
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ break;
+ }
+ spin_lock(&qp->recv_lock);
+ list_add_tail(&msg->list, &scif_info.loopb_recv_q);
+ spin_unlock(&qp->recv_lock);
+ queue_work(scif_info.loopb_wq, &scif_info.loopb_work);
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ } while (read_size == sizeof(struct scifmsg));
+ return read_size;
+}
+
+/**
+ * scif_setup_loopback_qp - One time setup work for Loopback Node Qp.
+ * @scifdev: SCIF device
+ *
+ * Sets up the required loopback workqueues, queue pairs and ring buffers
+ */
+int scif_setup_loopback_qp(struct scif_dev *scifdev)
+{
+ int err = 0;
+ void *local_q;
+ struct scif_qp *qp;
+ struct scif_peer_dev *spdev;
+
+ err = scif_setup_intr_wq(scifdev);
+ if (err)
+ goto exit;
+ INIT_LIST_HEAD(&scif_info.loopb_recv_q);
+ snprintf(scif_info.loopb_wqname, sizeof(scif_info.loopb_wqname),
+ "SCIF LOOPB %d", scifdev->node);
+ scif_info.loopb_wq =
+ alloc_ordered_workqueue(scif_info.loopb_wqname, 0);
+ if (!scif_info.loopb_wq) {
+ err = -ENOMEM;
+ goto destroy_intr;
+ }
+ INIT_WORK(&scif_info.loopb_work, scif_loopb_wq_handler);
+ /* Allocate Self Qpair */
+ scifdev->qpairs = kzalloc(sizeof(*scifdev->qpairs), GFP_KERNEL);
+ if (!scifdev->qpairs) {
+ err = -ENOMEM;
+ goto destroy_loopb_wq;
+ }
+
+ qp = scifdev->qpairs;
+ qp->magic = SCIFEP_MAGIC;
+ spin_lock_init(&qp->send_lock);
+ spin_lock_init(&qp->recv_lock);
+
+ local_q = kzalloc(SCIF_NODE_QP_SIZE, GFP_KERNEL);
+ if (!local_q) {
+ err = -ENOMEM;
+ goto free_qpairs;
+ }
+ /*
+ * For loopback the inbound_q and outbound_q are essentially the same
+ * since the Node sends a message on the loopback interface to the
+ * outbound_q which is then received on the inbound_q.
+ */
+ scif_rb_init(&qp->outbound_q,
+ &qp->local_read,
+ &qp->local_write,
+ local_q, get_count_order(SCIF_NODE_QP_SIZE));
+
+ scif_rb_init(&qp->inbound_q,
+ &qp->local_read,
+ &qp->local_write,
+ local_q, get_count_order(SCIF_NODE_QP_SIZE));
+ scif_info.nodeid = scifdev->node;
+ spdev = scif_peer_register_device(scifdev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto free_local_q;
+ }
+ scif_info.loopb_dev = scifdev;
+ return err;
+free_local_q:
+ kfree(local_q);
+free_qpairs:
+ kfree(scifdev->qpairs);
+destroy_loopb_wq:
+ destroy_workqueue(scif_info.loopb_wq);
+destroy_intr:
+ scif_destroy_intr_wq(scifdev);
+exit:
+ return err;
+}
+
+/**
+ * scif_destroy_loopback_qp - One time uninit work for Loopback Node Qp
+ * @scifdev: SCIF device
+ *
+ * Destroys the workqueues and frees up the Ring Buffer and Queue Pair memory.
+ */
+int scif_destroy_loopback_qp(struct scif_dev *scifdev)
+{
+ struct scif_peer_dev *spdev;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ rcu_read_unlock();
+ if (spdev)
+ scif_peer_unregister_device(spdev);
+ destroy_workqueue(scif_info.loopb_wq);
+ scif_destroy_intr_wq(scifdev);
+ kfree(scifdev->qpairs->outbound_q.rb_base);
+ kfree(scifdev->qpairs);
+ scifdev->sdev = NULL;
+ scif_info.loopb_dev = NULL;
+ return 0;
+}
+
+void scif_destroy_p2p(struct scif_dev *scifdev)
+{
+ struct scif_dev *peer_dev;
+ struct scif_p2p_info *p2p;
+ struct list_head *pos, *tmp;
+ int bd;
+
+ mutex_lock(&scif_info.conflock);
+ /* Free P2P mappings in the given node for all its peer nodes */
+ list_for_each_safe(pos, tmp, &scifdev->p2p) {
+ p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
+ dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
+ p2p->sg_nentries[SCIF_PPI_MMIO],
+ DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
+ p2p->sg_nentries[SCIF_PPI_APER],
+ DMA_BIDIRECTIONAL);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
+ list_del(pos);
+ kfree(p2p);
+ }
+
+ /* Free P2P mapping created in the peer nodes for the given node */
+ for (bd = SCIF_MGMT_NODE + 1; bd <= scif_info.maxid; bd++) {
+ peer_dev = &scif_dev[bd];
+ list_for_each_safe(pos, tmp, &peer_dev->p2p) {
+ p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
+ if (p2p->ppi_peer_id == scifdev->node) {
+ dma_unmap_sg(&peer_dev->sdev->dev,
+ p2p->ppi_sg[SCIF_PPI_MMIO],
+ p2p->sg_nentries[SCIF_PPI_MMIO],
+ DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&peer_dev->sdev->dev,
+ p2p->ppi_sg[SCIF_PPI_APER],
+ p2p->sg_nentries[SCIF_PPI_APER],
+ DMA_BIDIRECTIONAL);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
+ list_del(pos);
+ kfree(p2p);
+ }
+ }
+ }
+ mutex_unlock(&scif_info.conflock);
+}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.h b/drivers/misc/mic/scif/scif_nodeqp.h
new file mode 100644
index 000000000000..6c0ed6783479
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nodeqp.h
@@ -0,0 +1,183 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_NODEQP
+#define SCIF_NODEQP
+
+#include "scif_rb.h"
+#include "scif_peer_bus.h"
+
+#define SCIF_INIT 1 /* First message sent to the peer node for discovery */
+#define SCIF_EXIT 2 /* Last message from the peer informing intent to exit */
+#define SCIF_EXIT_ACK 3 /* Response to SCIF_EXIT message */
+#define SCIF_NODE_ADD 4 /* Tell Online nodes a new node exits */
+#define SCIF_NODE_ADD_ACK 5 /* Confirm to mgmt node sequence is finished */
+#define SCIF_NODE_ADD_NACK 6 /* SCIF_NODE_ADD failed */
+#define SCIF_NODE_REMOVE 7 /* Request to deactivate a SCIF node */
+#define SCIF_NODE_REMOVE_ACK 8 /* Response to a SCIF_NODE_REMOVE message */
+#define SCIF_CNCT_REQ 9 /* Phys addr of Request connection to a port */
+#define SCIF_CNCT_GNT 10 /* Phys addr of new Grant connection request */
+#define SCIF_CNCT_GNTACK 11 /* Error type Reject a connection request */
+#define SCIF_CNCT_GNTNACK 12 /* Error type Reject a connection request */
+#define SCIF_CNCT_REJ 13 /* Error type Reject a connection request */
+#define SCIF_DISCNCT 14 /* Notify peer that connection is being terminated */
+#define SCIF_DISCNT_ACK 15 /* Notify peer that connection is being terminated */
+#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */
+#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */
+#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/
+#define SCIF_MAX_MSG SCIF_GET_NODE_INFO
+
+/*
+ * struct scifmsg - Node QP message format
+ *
+ * @src: Source information
+ * @dst: Destination information
+ * @uop: The message opcode
+ * @payload: Unique payload format for each message
+ */
+struct scifmsg {
+ struct scif_port_id src;
+ struct scif_port_id dst;
+ u32 uop;
+ u64 payload[4];
+} __packed;
+
+/*
+ * struct scif_qp - Node Queue Pair
+ *
+ * Interesting structure -- a little difficult because we can only
+ * write across the PCIe, so any r/w pointer we need to read is
+ * local. We only need to read the read pointer on the inbound_q
+ * and read the write pointer in the outbound_q
+ *
+ * @magic: Magic value to ensure the peer sees the QP correctly
+ * @outbound_q: The outbound ring buffer for sending messages
+ * @inbound_q: The inbound ring buffer for receiving messages
+ * @local_write: Local write index
+ * @local_read: Local read index
+ * @remote_qp: The remote queue pair
+ * @local_buf: DMA address of local ring buffer
+ * @local_qp: DMA address of the local queue pair data structure
+ * @remote_buf: DMA address of remote ring buffer
+ * @qp_state: QP state i.e. online or offline used for P2P
+ * @send_lock: synchronize access to outbound queue
+ * @recv_lock: Synchronize access to inbound queue
+ */
+struct scif_qp {
+ u64 magic;
+#define SCIFEP_MAGIC 0x5c1f000000005c1fULL
+ struct scif_rb outbound_q;
+ struct scif_rb inbound_q;
+
+ u32 local_write __aligned(64);
+ u32 local_read __aligned(64);
+ struct scif_qp *remote_qp;
+ dma_addr_t local_buf;
+ dma_addr_t local_qp;
+ dma_addr_t remote_buf;
+ u32 qp_state;
+#define SCIF_QP_OFFLINE 0xdead
+#define SCIF_QP_ONLINE 0xc0de
+ spinlock_t send_lock;
+ spinlock_t recv_lock;
+};
+
+/*
+ * struct scif_loopb_msg - An element in the loopback Node QP message list.
+ *
+ * @msg - The SCIF node QP message
+ * @list - link in the list of messages
+ */
+struct scif_loopb_msg {
+ struct scifmsg msg;
+ struct list_head list;
+};
+
+int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
+int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp);
+int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp);
+int scif_setup_qp(struct scif_dev *scifdev);
+int scif_qp_response(phys_addr_t phys, struct scif_dev *dev);
+int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
+ int local_size, struct scif_dev *scifdev);
+int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
+ dma_addr_t phys, int local_size,
+ struct scif_dev *scifdev);
+int scif_setup_qp_connect_response(struct scif_dev *scifdev,
+ struct scif_qp *qp, u64 payload);
+int scif_setup_loopback_qp(struct scif_dev *scifdev);
+int scif_destroy_loopback_qp(struct scif_dev *scifdev);
+void scif_poll_qp_state(struct work_struct *work);
+void scif_qp_response_ack(struct work_struct *work);
+void scif_destroy_p2p(struct scif_dev *scifdev);
+void scif_send_exit(struct scif_dev *scifdev);
+static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev)
+{
+ struct scif_peer_dev *spdev;
+ struct device *spdev_ret;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ if (spdev)
+ spdev_ret = get_device(&spdev->dev);
+ else
+ spdev_ret = ERR_PTR(-ENODEV);
+ rcu_read_unlock();
+ return spdev_ret;
+}
+
+static inline void scif_put_peer_dev(struct device *dev)
+{
+ put_device(dev);
+}
+#endif /* SCIF_NODEQP */
diff --git a/drivers/misc/mic/scif/scif_peer_bus.c b/drivers/misc/mic/scif/scif_peer_bus.c
new file mode 100644
index 000000000000..589ae9ad2501
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_peer_bus.c
@@ -0,0 +1,124 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ */
+#include "scif_main.h"
+#include "../bus/scif_bus.h"
+#include "scif_peer_bus.h"
+
+static inline struct scif_peer_dev *
+dev_to_scif_peer(struct device *dev)
+{
+ return container_of(dev, struct scif_peer_dev, dev);
+}
+
+static inline struct scif_peer_driver *
+drv_to_scif_peer(struct device_driver *drv)
+{
+ return container_of(drv, struct scif_peer_driver, driver);
+}
+
+static int scif_peer_dev_match(struct device *dv, struct device_driver *dr)
+{
+ return !strncmp(dev_name(dv), dr->name, 4);
+}
+
+static int scif_peer_dev_probe(struct device *d)
+{
+ struct scif_peer_dev *dev = dev_to_scif_peer(d);
+ struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver);
+
+ return drv->probe(dev);
+}
+
+static int scif_peer_dev_remove(struct device *d)
+{
+ struct scif_peer_dev *dev = dev_to_scif_peer(d);
+ struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver);
+
+ drv->remove(dev);
+ return 0;
+}
+
+static struct bus_type scif_peer_bus = {
+ .name = "scif_peer_bus",
+ .match = scif_peer_dev_match,
+ .probe = scif_peer_dev_probe,
+ .remove = scif_peer_dev_remove,
+};
+
+int scif_peer_register_driver(struct scif_peer_driver *driver)
+{
+ driver->driver.bus = &scif_peer_bus;
+ return driver_register(&driver->driver);
+}
+
+void scif_peer_unregister_driver(struct scif_peer_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+
+static void scif_peer_release_dev(struct device *d)
+{
+ struct scif_peer_dev *sdev = dev_to_scif_peer(d);
+ struct scif_dev *scifdev = &scif_dev[sdev->dnode];
+
+ scif_cleanup_scifdev(scifdev);
+ kfree(sdev);
+}
+
+struct scif_peer_dev *
+scif_peer_register_device(struct scif_dev *scifdev)
+{
+ int ret;
+ struct scif_peer_dev *spdev;
+
+ spdev = kzalloc(sizeof(*spdev), GFP_KERNEL);
+ if (!spdev)
+ return ERR_PTR(-ENOMEM);
+
+ spdev->dev.parent = scifdev->sdev->dev.parent;
+ spdev->dev.release = scif_peer_release_dev;
+ spdev->dnode = scifdev->node;
+ spdev->dev.bus = &scif_peer_bus;
+
+ dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
+ /*
+ * device_register() causes the bus infrastructure to look for a
+ * matching driver.
+ */
+ ret = device_register(&spdev->dev);
+ if (ret)
+ goto free_spdev;
+ return spdev;
+free_spdev:
+ kfree(spdev);
+ return ERR_PTR(ret);
+}
+
+void scif_peer_unregister_device(struct scif_peer_dev *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+
+int scif_peer_bus_init(void)
+{
+ return bus_register(&scif_peer_bus);
+}
+
+void scif_peer_bus_exit(void)
+{
+ bus_unregister(&scif_peer_bus);
+}
diff --git a/drivers/misc/mic/scif/scif_peer_bus.h b/drivers/misc/mic/scif/scif_peer_bus.h
new file mode 100644
index 000000000000..33f0dbb30152
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_peer_bus.h
@@ -0,0 +1,65 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ */
+#ifndef _SCIF_PEER_BUS_H_
+#define _SCIF_PEER_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mic_common.h>
+
+/*
+ * Peer devices show up as PCIe devices for the mgmt node but not the cards.
+ * The mgmt node discovers all the cards on the PCIe bus and informs the other
+ * cards about their peers. Upon notification of a peer a node adds a peer
+ * device to the peer bus to maintain symmetry in the way devices are
+ * discovered across all nodes in the SCIF network.
+ */
+/**
+ * scif_peer_dev - representation of a peer SCIF device
+ * @dev: underlying device
+ * @dnode - The destination node which this device will communicate with.
+ */
+struct scif_peer_dev {
+ struct device dev;
+ u8 dnode;
+};
+
+/**
+ * scif_peer_driver - operations for a scif_peer I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct scif_peer_driver {
+ struct device_driver driver;
+ const struct scif_peer_dev_id *id_table;
+
+ int (*probe)(struct scif_peer_dev *dev);
+ void (*remove)(struct scif_peer_dev *dev);
+};
+
+struct scif_dev;
+
+int scif_peer_register_driver(struct scif_peer_driver *driver);
+void scif_peer_unregister_driver(struct scif_peer_driver *driver);
+
+struct scif_peer_dev *scif_peer_register_device(struct scif_dev *sdev);
+void scif_peer_unregister_device(struct scif_peer_dev *sdev);
+
+int scif_peer_bus_init(void);
+void scif_peer_bus_exit(void);
+#endif /* _SCIF_PEER_BUS_H */
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
new file mode 100644
index 000000000000..594e18d279d8
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_ports.c
@@ -0,0 +1,124 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/idr.h>
+
+#include "scif_main.h"
+
+#define SCIF_PORT_COUNT 0x10000 /* Ports available */
+
+struct idr scif_ports;
+
+/*
+ * struct scif_port - SCIF port information
+ *
+ * @ref_cnt - Reference count since there can be multiple endpoints
+ * created via scif_accept(..) simultaneously using a port.
+ */
+struct scif_port {
+ int ref_cnt;
+};
+
+/**
+ * __scif_get_port - Reserve a specified port # for SCIF and add it
+ * to the global list.
+ * @port : port # to be reserved.
+ *
+ * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
+ * On memory allocation failure, returns -ENOMEM.
+ */
+static int __scif_get_port(int start, int end)
+{
+ int id;
+ struct scif_port *port = kzalloc(sizeof(*port), GFP_ATOMIC);
+
+ if (!port)
+ return -ENOMEM;
+ spin_lock(&scif_info.port_lock);
+ id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC);
+ if (id >= 0)
+ port->ref_cnt++;
+ spin_unlock(&scif_info.port_lock);
+ return id;
+}
+
+/**
+ * scif_rsrv_port - Reserve a specified port # for SCIF.
+ * @port : port # to be reserved.
+ *
+ * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
+ * On memory allocation failure, returns -ENOMEM.
+ */
+int scif_rsrv_port(u16 port)
+{
+ return __scif_get_port(port, port + 1);
+}
+
+/**
+ * scif_get_new_port - Get and reserve any port # for SCIF in the range
+ * SCIF_PORT_RSVD + 1 to SCIF_PORT_COUNT - 1.
+ *
+ * @return : Allocated SCIF port #, or -ENOSPC if no ports available.
+ * On memory allocation failure, returns -ENOMEM.
+ */
+int scif_get_new_port(void)
+{
+ return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT);
+}
+
+/**
+ * scif_get_port - Increment the reference count for a SCIF port
+ * @id : SCIF port
+ *
+ * @return : None
+ */
+void scif_get_port(u16 id)
+{
+ struct scif_port *port;
+
+ if (!id)
+ return;
+ spin_lock(&scif_info.port_lock);
+ port = idr_find(&scif_ports, id);
+ if (port)
+ port->ref_cnt++;
+ spin_unlock(&scif_info.port_lock);
+}
+
+/**
+ * scif_put_port - Release a reserved SCIF port
+ * @id : SCIF port to be released.
+ *
+ * @return : None
+ */
+void scif_put_port(u16 id)
+{
+ struct scif_port *port;
+
+ if (!id)
+ return;
+ spin_lock(&scif_info.port_lock);
+ port = idr_find(&scif_ports, id);
+ if (port) {
+ port->ref_cnt--;
+ if (!port->ref_cnt) {
+ idr_remove(&scif_ports, id);
+ kfree(port);
+ }
+ }
+ spin_unlock(&scif_info.port_lock);
+}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
new file mode 100644
index 000000000000..637cc4686742
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -0,0 +1,249 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/circ_buf.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include "scif_rb.h"
+
+#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
+#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
+
+/**
+ * scif_rb_init - Initializes the ring buffer
+ * @rb: ring buffer
+ * @read_ptr: A pointer to the read offset
+ * @write_ptr: A pointer to the write offset
+ * @rb_base: A pointer to the base of the ring buffer
+ * @size: The size of the ring buffer in powers of two
+ */
+void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
+ void *rb_base, u8 size)
+{
+ rb->rb_base = rb_base;
+ rb->size = (1 << size);
+ rb->read_ptr = read_ptr;
+ rb->write_ptr = write_ptr;
+ rb->current_read_offset = *read_ptr;
+ rb->current_write_offset = *write_ptr;
+}
+
+/* Copies a message to the ring buffer -- handles the wrap around case */
+static void memcpy_torb(struct scif_rb *rb, void *header,
+ void *msg, u32 size)
+{
+ u32 size1, size2;
+
+ if (header + size >= rb->rb_base + rb->size) {
+ /* Need to call two copies if it wraps around */
+ size1 = (u32)(rb->rb_base + rb->size - header);
+ size2 = size - size1;
+ memcpy_toio((void __iomem __force *)header, msg, size1);
+ memcpy_toio((void __iomem __force *)rb->rb_base,
+ msg + size1, size2);
+ } else {
+ memcpy_toio((void __iomem __force *)header, msg, size);
+ }
+}
+
+/* Copies a message from the ring buffer -- handles the wrap around case */
+static void memcpy_fromrb(struct scif_rb *rb, void *header,
+ void *msg, u32 size)
+{
+ u32 size1, size2;
+
+ if (header + size >= rb->rb_base + rb->size) {
+ /* Need to call two copies if it wraps around */
+ size1 = (u32)(rb->rb_base + rb->size - header);
+ size2 = size - size1;
+ memcpy_fromio(msg, (void __iomem __force *)header, size1);
+ memcpy_fromio(msg + size1,
+ (void __iomem __force *)rb->rb_base, size2);
+ } else {
+ memcpy_fromio(msg, (void __iomem __force *)header, size);
+ }
+}
+
+/**
+ * scif_rb_space - Query space available for writing to the RB
+ * @rb: ring buffer
+ *
+ * Return: size available for writing to RB in bytes.
+ */
+u32 scif_rb_space(struct scif_rb *rb)
+{
+ rb->current_read_offset = *rb->read_ptr;
+ /*
+ * Update from the HW read pointer only once the peer has exposed the
+ * new empty slot. This barrier is paired with the memory barrier
+ * scif_rb_update_read_ptr()
+ */
+ mb();
+ return scif_rb_ring_space(rb->current_write_offset,
+ rb->current_read_offset, rb->size);
+}
+
+/**
+ * scif_rb_write - Write a message to the RB
+ * @rb: ring buffer
+ * @msg: buffer to send the message. Must be at least size bytes long
+ * @size: the size (in bytes) to be copied to the RB
+ *
+ * This API does not block if there isn't enough space in the RB.
+ * Returns: 0 on success or -ENOMEM on failure
+ */
+int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
+{
+ void *header;
+
+ if (scif_rb_space(rb) < size)
+ return -ENOMEM;
+ header = rb->rb_base + rb->current_write_offset;
+ memcpy_torb(rb, header, msg, size);
+ /*
+ * Wait until scif_rb_commit(). Update the local ring
+ * buffer data, not the shared data until commit.
+ */
+ rb->current_write_offset =
+ (rb->current_write_offset + size) & (rb->size - 1);
+ return 0;
+}
+
+/**
+ * scif_rb_commit - To submit the message to let the peer fetch it
+ * @rb: ring buffer
+ */
+void scif_rb_commit(struct scif_rb *rb)
+{
+ /*
+ * We must ensure ordering between the all the data committed
+ * previously before we expose the new message to the peer by
+ * updating the write_ptr. This write barrier is paired with
+ * the read barrier in scif_rb_count(..)
+ */
+ wmb();
+ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+#ifdef CONFIG_INTEL_MIC_CARD
+ /*
+ * X100 Si bug: For the case where a Core is performing an EXT_WR
+ * followed by a Doorbell Write, the Core must perform two EXT_WR to the
+ * same address with the same data before it does the Doorbell Write.
+ * This way, if ordering is violated for the Interrupt Message, it will
+ * fall just behind the first Posted associated with the first EXT_WR.
+ */
+ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+#endif
+}
+
+/**
+ * scif_rb_get - To get next message from the ring buffer
+ * @rb: ring buffer
+ * @size: Number of bytes to be read
+ *
+ * Return: NULL if no bytes to be read from the ring buffer, otherwise the
+ * pointer to the next byte
+ */
+static void *scif_rb_get(struct scif_rb *rb, u32 size)
+{
+ void *header = NULL;
+
+ if (scif_rb_count(rb, size) >= size)
+ header = rb->rb_base + rb->current_read_offset;
+ return header;
+}
+
+/*
+ * scif_rb_get_next - Read from ring buffer.
+ * @rb: ring buffer
+ * @msg: buffer to hold the message. Must be at least size bytes long
+ * @size: Number of bytes to be read
+ *
+ * Return: number of bytes read if available bytes are >= size, otherwise
+ * returns zero.
+ */
+u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
+{
+ void *header = NULL;
+ int read_size = 0;
+
+ header = scif_rb_get(rb, size);
+ if (header) {
+ u32 next_cmd_offset =
+ (rb->current_read_offset + size) & (rb->size - 1);
+
+ read_size = size;
+ rb->current_read_offset = next_cmd_offset;
+ memcpy_fromrb(rb, header, msg, size);
+ }
+ return read_size;
+}
+
+/**
+ * scif_rb_update_read_ptr
+ * @rb: ring buffer
+ */
+void scif_rb_update_read_ptr(struct scif_rb *rb)
+{
+ u32 new_offset;
+
+ new_offset = rb->current_read_offset;
+ /*
+ * We must ensure ordering between the all the data committed or read
+ * previously before we expose the empty slot to the peer by updating
+ * the read_ptr. This barrier is paired with the memory barrier in
+ * scif_rb_space(..)
+ */
+ mb();
+ ACCESS_ONCE(*rb->read_ptr) = new_offset;
+#ifdef CONFIG_INTEL_MIC_CARD
+ /*
+ * X100 Si Bug: For the case where a Core is performing an EXT_WR
+ * followed by a Doorbell Write, the Core must perform two EXT_WR to the
+ * same address with the same data before it does the Doorbell Write.
+ * This way, if ordering is violated for the Interrupt Message, it will
+ * fall just behind the first Posted associated with the first EXT_WR.
+ */
+ ACCESS_ONCE(*rb->read_ptr) = new_offset;
+#endif
+}
+
+/**
+ * scif_rb_count
+ * @rb: ring buffer
+ * @size: Number of bytes expected to be read
+ *
+ * Return: number of bytes that can be read from the RB
+ */
+u32 scif_rb_count(struct scif_rb *rb, u32 size)
+{
+ if (scif_rb_ring_cnt(rb->current_write_offset,
+ rb->current_read_offset,
+ rb->size) < size) {
+ rb->current_write_offset = *rb->write_ptr;
+ /*
+ * Update from the HW write pointer if empty only once the peer
+ * has exposed the new message. This read barrier is paired
+ * with the write barrier in scif_rb_commit(..)
+ */
+ smp_rmb();
+ }
+ return scif_rb_ring_cnt(rb->current_write_offset,
+ rb->current_read_offset,
+ rb->size);
+}
diff --git a/drivers/misc/mic/scif/scif_rb.h b/drivers/misc/mic/scif/scif_rb.h
new file mode 100644
index 000000000000..166dffe3093d
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rb.h
@@ -0,0 +1,100 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ */
+#ifndef SCIF_RB_H
+#define SCIF_RB_H
+/*
+ * This file describes a general purpose, byte based ring buffer. Writers to the
+ * ring buffer need to synchronize using a lock. The same is true for readers,
+ * although in practice, the ring buffer has a single reader. It is lockless
+ * between producer and consumer so it can handle being used across the PCIe
+ * bus. The ring buffer ensures that there are no reads across the PCIe bus for
+ * performance reasons. Two of these are used to form a single bidirectional
+ * queue-pair across PCIe.
+ */
+/*
+ * struct scif_rb - SCIF Ring Buffer
+ *
+ * @rb_base: The base of the memory used for storing RB messages
+ * @read_ptr: Pointer to the read offset
+ * @write_ptr: Pointer to the write offset
+ * @size: Size of the memory in rb_base
+ * @current_read_offset: Cached read offset for performance
+ * @current_write_offset: Cached write offset for performance
+ */
+struct scif_rb {
+ void *rb_base;
+ u32 *read_ptr;
+ u32 *write_ptr;
+ u32 size;
+ u32 current_read_offset;
+ u32 current_write_offset;
+};
+
+/* methods used by both */
+void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
+ void *rb_base, u8 size);
+/* writer only methods */
+/* write a new command, then scif_rb_commit() */
+int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
+/* after write(), then scif_rb_commit() */
+void scif_rb_commit(struct scif_rb *rb);
+/* query space available for writing to a RB. */
+u32 scif_rb_space(struct scif_rb *rb);
+
+/* reader only methods */
+/* read a new message from the ring buffer of size bytes */
+u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
+/* update the read pointer so that the space can be reused */
+void scif_rb_update_read_ptr(struct scif_rb *rb);
+/* count the number of bytes that can be read */
+u32 scif_rb_count(struct scif_rb *rb, u32 size);
+#endif
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index eeaaf5fca105..15c33cc34a80 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -18,23 +18,20 @@
* MA 02110-1301, USA.
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
-#include <linux/err.h>
+#include <linux/genalloc.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/list.h>
#include <linux/list_sort.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/genalloc.h>
#define SRAM_GRANULARITY 32
struct sram_dev {
+ struct device *dev;
+ void __iomem *virt_base;
+
struct gen_pool *pool;
struct clk *clk;
};
@@ -54,62 +51,27 @@ static int sram_reserve_cmp(void *priv, struct list_head *a,
return ra->start - rb->start;
}
-static int sram_probe(struct platform_device *pdev)
+static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
{
- void __iomem *virt_base;
- struct sram_dev *sram;
- struct resource *res;
- struct device_node *np = pdev->dev.of_node, *child;
+ struct device_node *np = sram->dev->of_node, *child;
unsigned long size, cur_start, cur_size;
struct sram_reserve *rblocks, *block;
struct list_head reserve_list;
unsigned int nblocks;
- int ret;
+ int ret = 0;
INIT_LIST_HEAD(&reserve_list);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "found no memory resource\n");
- return -EINVAL;
- }
-
size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev,
- res->start, size, pdev->name)) {
- dev_err(&pdev->dev, "could not request region for resource\n");
- return -EBUSY;
- }
-
- virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
- if (IS_ERR(virt_base))
- return PTR_ERR(virt_base);
-
- sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
- if (!sram)
- return -ENOMEM;
-
- sram->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sram->clk))
- sram->clk = NULL;
- else
- clk_prepare_enable(sram->clk);
-
- sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
- if (!sram->pool)
- return -ENOMEM;
-
/*
* We need an additional block to mark the end of the memory region
* after the reserved blocks from the dt are processed.
*/
nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
- if (!rblocks) {
- ret = -ENOMEM;
- goto err_alloc;
- }
+ if (!rblocks)
+ return -ENOMEM;
block = &rblocks[0];
for_each_available_child_of_node(np, child) {
@@ -117,17 +79,19 @@ static int sram_probe(struct platform_device *pdev)
ret = of_address_to_resource(child, 0, &child_res);
if (ret < 0) {
- dev_err(&pdev->dev,
+ dev_err(sram->dev,
"could not get address for node %s\n",
child->full_name);
+ of_node_put(child);
goto err_chunks;
}
if (child_res.start < res->start || child_res.end > res->end) {
- dev_err(&pdev->dev,
+ dev_err(sram->dev,
"reserved block %s outside the sram area\n",
child->full_name);
ret = -EINVAL;
+ of_node_put(child);
goto err_chunks;
}
@@ -135,9 +99,8 @@ static int sram_probe(struct platform_device *pdev)
block->size = resource_size(&child_res);
list_add_tail(&block->list, &reserve_list);
- dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n",
- block->start,
- block->start + block->size);
+ dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
+ block->start, block->start + block->size);
block++;
}
@@ -154,7 +117,7 @@ static int sram_probe(struct platform_device *pdev)
list_for_each_entry(block, &reserve_list, list) {
/* can only happen if sections overlap */
if (block->start < cur_start) {
- dev_err(&pdev->dev,
+ dev_err(sram->dev,
"block at 0x%x starts after current offset 0x%lx\n",
block->start, cur_start);
ret = -EINVAL;
@@ -174,10 +137,11 @@ static int sram_probe(struct platform_device *pdev)
*/
cur_size = block->start - cur_start;
- dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n",
+ dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
cur_start, cur_start + cur_size);
+
ret = gen_pool_add_virt(sram->pool,
- (unsigned long)virt_base + cur_start,
+ (unsigned long)sram->virt_base + cur_start,
res->start + cur_start, cur_size, -1);
if (ret < 0)
goto err_chunks;
@@ -186,20 +150,63 @@ static int sram_probe(struct platform_device *pdev)
cur_start = block->start + block->size;
}
+ err_chunks:
kfree(rblocks);
+ return ret;
+}
+
+static int sram_probe(struct platform_device *pdev)
+{
+ struct sram_dev *sram;
+ struct resource *res;
+ size_t size;
+ int ret;
+
+ sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
+ if (!sram)
+ return -ENOMEM;
+
+ sram->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(sram->dev, "found no memory resource\n");
+ return -EINVAL;
+ }
+
+ size = resource_size(res);
+
+ if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
+ dev_err(sram->dev, "could not request region for resource\n");
+ return -EBUSY;
+ }
+
+ sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
+ if (IS_ERR(sram->virt_base))
+ return PTR_ERR(sram->virt_base);
+
+ sram->pool = devm_gen_pool_create(sram->dev,
+ ilog2(SRAM_GRANULARITY), -1);
+ if (!sram->pool)
+ return -ENOMEM;
+
+ ret = sram_reserve_regions(sram, res);
+ if (ret)
+ return ret;
+
+ sram->clk = devm_clk_get(sram->dev, NULL);
+ if (IS_ERR(sram->clk))
+ sram->clk = NULL;
+ else
+ clk_prepare_enable(sram->clk);
+
platform_set_drvdata(pdev, sram);
- dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
+ dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
+ gen_pool_size(sram->pool) / 1024, sram->virt_base);
return 0;
-
-err_chunks:
- kfree(rblocks);
-err_alloc:
- if (sram->clk)
- clk_disable_unprepare(sram->clk);
- return ret;
}
static int sram_remove(struct platform_device *pdev)
@@ -207,7 +214,7 @@ static int sram_remove(struct platform_device *pdev)
struct sram_dev *sram = platform_get_drvdata(pdev);
if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
- dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
+ dev_err(sram->dev, "removed while SRAM allocated\n");
if (sram->clk)
clk_disable_unprepare(sram->clk);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 18e7a03985d4..5027b8ffae43 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -752,9 +752,8 @@ static struct ti_st_plat_data *get_platform_data(struct device *dev)
int len;
dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
-
if (!dt_pdata)
- pr_err("Can't allocate device_tree platform data\n");
+ return NULL;
dt_property = of_get_property(np, "dev_name", &len);
if (dt_property)