diff options
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r-- | drivers/firewire/ohci.c | 701 |
1 files changed, 548 insertions, 153 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9f627e758cfc..7f03540cabe8 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -18,6 +18,7 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <linux/bug.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/device.h> @@ -32,11 +33,13 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/time.h> #include <asm/byteorder.h> #include <asm/page.h> @@ -170,6 +173,10 @@ struct fw_ohci { int generation; int request_generation; /* for timestamping incoming requests */ unsigned quirks; + unsigned int pri_req_max; + u32 bus_time; + bool is_root; + bool csr_state_setclear_abdicate; /* * Spinlock for accessing fw_ohci data. Never call out of @@ -177,16 +184,20 @@ struct fw_ohci { */ spinlock_t lock; + struct mutex phy_reg_mutex; + struct ar_context ar_request_ctx; struct ar_context ar_response_ctx; struct context at_request_ctx; struct context at_response_ctx; - u32 it_context_mask; + u32 it_context_mask; /* unoccupied IT contexts */ struct iso_context *it_context_list; - u64 ir_context_channels; - u32 ir_context_mask; + u64 ir_context_channels; /* unoccupied channels */ + u32 ir_context_mask; /* unoccupied IR contexts */ struct iso_context *ir_context_list; + u64 mc_channels; /* channels in use by the multichannel IR context */ + bool mc_allocated; __be32 *config_rom; dma_addr_t config_rom_bus; @@ -231,12 +242,14 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) static char ohci_driver_name[] = KBUILD_MODNAME; +#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 #define QUIRK_CYCLE_TIMER 1 #define QUIRK_RESET_PACKET 2 #define QUIRK_BE_HEADERS 4 #define QUIRK_NO_1394A 8 +#define QUIRK_NO_MSI 16 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { @@ -247,6 +260,7 @@ static const struct { QUIRK_NO_1394A}, {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, @@ -260,6 +274,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) + ", disable MSI = " __stringify(QUIRK_NO_MSI) ")"); #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -288,7 +303,7 @@ static void log_irqs(u32 evt) !(evt & OHCI1394_busReset)) return; - fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, + fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, evt & OHCI1394_selfIDComplete ? " selfID" : "", evt & OHCI1394_RQPkt ? " AR_req" : "", evt & OHCI1394_RSPkt ? " AR_resp" : "", @@ -298,6 +313,7 @@ static void log_irqs(u32 evt) evt & OHCI1394_isochTx ? " IT" : "", evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", + evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", evt & OHCI1394_regAccessFail ? " regAccessFail" : "", evt & OHCI1394_busReset ? " busReset" : "", @@ -305,7 +321,8 @@ static void log_irqs(u32 evt) OHCI1394_RSPkt | OHCI1394_reqTxComplete | OHCI1394_respTxComplete | OHCI1394_isochRx | OHCI1394_isochTx | OHCI1394_postedWriteErr | - OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | + OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | + OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | OHCI1394_busReset) ? " ?" : ""); } @@ -470,12 +487,17 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr) int i; reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); - for (i = 0; i < 10; i++) { + for (i = 0; i < 3 + 100; i++) { val = reg_read(ohci, OHCI1394_PhyControl); if (val & OHCI1394_PhyControl_ReadDone) return OHCI1394_PhyControl_ReadData(val); - msleep(1); + /* + * Try a few times without waiting. Sleeping is necessary + * only when the link/PHY interface is busy. + */ + if (i >= 3) + msleep(1); } fw_error("failed to read phy reg\n"); @@ -488,25 +510,23 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Write(addr, val)); - for (i = 0; i < 100; i++) { + for (i = 0; i < 3 + 100; i++) { val = reg_read(ohci, OHCI1394_PhyControl); if (!(val & OHCI1394_PhyControl_WritePending)) return 0; - msleep(1); + if (i >= 3) + msleep(1); } fw_error("failed to write phy reg\n"); return -EBUSY; } -static int ohci_update_phy_reg(struct fw_card *card, int addr, - int clear_bits, int set_bits) +static int update_phy_reg(struct fw_ohci *ohci, int addr, + int clear_bits, int set_bits) { - struct fw_ohci *ohci = fw_ohci(card); - int ret; - - ret = read_phy_reg(ohci, addr); + int ret = read_phy_reg(ohci, addr); if (ret < 0) return ret; @@ -524,13 +544,38 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) { int ret; - ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); + ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); if (ret < 0) return ret; return read_phy_reg(ohci, addr); } +static int ohci_read_phy_reg(struct fw_card *card, int addr) +{ + struct fw_ohci *ohci = fw_ohci(card); + int ret; + + mutex_lock(&ohci->phy_reg_mutex); + ret = read_phy_reg(ohci, addr); + mutex_unlock(&ohci->phy_reg_mutex); + + return ret; +} + +static int ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) +{ + struct fw_ohci *ohci = fw_ohci(card); + int ret; + + mutex_lock(&ohci->phy_reg_mutex); + ret = update_phy_reg(ohci, addr, clear_bits, set_bits); + mutex_unlock(&ohci->phy_reg_mutex); + + return ret; +} + static int ar_context_add_page(struct ar_context *ctx) { struct device *dev = ctx->ohci->card.device; @@ -553,6 +598,7 @@ static int ar_context_add_page(struct ar_context *ctx) ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); ab->descriptor.branch_address = 0; + wmb(); /* finish init of new descriptors before branch_address update */ ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); ctx->last_buffer->next = ab; ctx->last_buffer = ab; @@ -940,6 +986,8 @@ static void context_append(struct context *ctx, d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); desc->used += (z + extra) * sizeof(*d); + + wmb(); /* finish init of new descriptors before branch_address update */ ctx->prev->branch_address = cpu_to_le32(d_bus | z); ctx->prev = find_branch_descriptor(d, z); @@ -1026,6 +1074,9 @@ static int at_context_queue_packet(struct context *ctx, header[1] = cpu_to_le32(packet->header[0]); header[2] = cpu_to_le32(packet->header[1]); d[0].req_count = cpu_to_le16(12); + + if (is_ping_packet(packet->header)) + d[0].control |= cpu_to_le16(DESCRIPTOR_PING); break; case 4: @@ -1311,6 +1362,78 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet) } +static u32 cycle_timer_ticks(u32 cycle_timer) +{ + u32 ticks; + + ticks = cycle_timer & 0xfff; + ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); + ticks += (3072 * 8000) * (cycle_timer >> 25); + + return ticks; +} + +/* + * Some controllers exhibit one or more of the following bugs when updating the + * iso cycle timer register: + * - When the lowest six bits are wrapping around to zero, a read that happens + * at the same time will return garbage in the lowest ten bits. + * - When the cycleOffset field wraps around to zero, the cycleCount field is + * not incremented for about 60 ns. + * - Occasionally, the entire register reads zero. + * + * To catch these, we read the register three times and ensure that the + * difference between each two consecutive reads is approximately the same, i.e. + * less than twice the other. Furthermore, any negative difference indicates an + * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to + * execute, so we have enough precision to compute the ratio of the differences.) + */ +static u32 get_cycle_time(struct fw_ohci *ohci) +{ + u32 c0, c1, c2; + u32 t0, t1, t2; + s32 diff01, diff12; + int i; + + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + + if (ohci->quirks & QUIRK_CYCLE_TIMER) { + i = 0; + c1 = c2; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + do { + c0 = c1; + c1 = c2; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + t0 = cycle_timer_ticks(c0); + t1 = cycle_timer_ticks(c1); + t2 = cycle_timer_ticks(c2); + diff01 = t1 - t0; + diff12 = t2 - t1; + } while ((diff01 <= 0 || diff12 <= 0 || + diff01 / diff12 >= 2 || diff12 / diff01 >= 2) + && i++ < 20); + } + + return c2; +} + +/* + * This function has to be called at least every 64 seconds. The bus_time + * field stores not only the upper 25 bits of the BUS_TIME register but also + * the most significant bit of the cycle timer in bit 6 so that we can detect + * changes in this bit. + */ +static u32 update_bus_time(struct fw_ohci *ohci) +{ + u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; + + if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) + ohci->bus_time += 0x40; + + return ohci->bus_time | cycle_time_seconds; +} + static void bus_reset_tasklet(unsigned long data) { struct fw_ohci *ohci = (struct fw_ohci *)data; @@ -1319,6 +1442,7 @@ static void bus_reset_tasklet(unsigned long data) unsigned long flags; void *free_rom = NULL; dma_addr_t free_rom_bus = 0; + bool is_new_root; reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) { @@ -1332,6 +1456,12 @@ static void bus_reset_tasklet(unsigned long data) ohci->node_id = reg & (OHCI1394_NodeID_busNumber | OHCI1394_NodeID_nodeNumber); + is_new_root = (reg & OHCI1394_NodeID_root) != 0; + if (!(ohci->is_root && is_new_root)) + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_cycleMaster); + ohci->is_root = is_new_root; + reg = reg_read(ohci, OHCI1394_SelfIDCount); if (reg & OHCI1394_SelfIDCount_selfIDError) { fw_notify("inconsistent self IDs\n"); @@ -1439,7 +1569,9 @@ static void bus_reset_tasklet(unsigned long data) self_id_count, ohci->self_id_buffer); fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, - self_id_count, ohci->self_id_buffer); + self_id_count, ohci->self_id_buffer, + ohci->csr_state_setclear_abdicate); + ohci->csr_state_setclear_abdicate = false; } static irqreturn_t irq_handler(int irq, void *data) @@ -1515,6 +1647,12 @@ static irqreturn_t irq_handler(int irq, void *data) fw_notify("isochronous cycle inconsistent\n"); } + if (event & OHCI1394_cycle64Seconds) { + spin_lock(&ohci->lock); + update_bus_time(ohci); + spin_unlock(&ohci->lock); + } + return IRQ_HANDLED; } @@ -1577,7 +1715,7 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci) clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; set = 0; } - ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); + ret = update_phy_reg(ohci, 5, clear, set); if (ret < 0) return ret; @@ -1599,7 +1737,7 @@ static int ohci_enable(struct fw_card *card, { struct fw_ohci *ohci = fw_ohci(card); struct pci_dev *dev = to_pci_dev(card->device); - u32 lps; + u32 lps, seconds, version, irqs; int i, ret; if (software_reset(ohci)) { @@ -1635,17 +1773,34 @@ static int ohci_enable(struct fw_card *card, OHCI1394_HCControl_noByteSwapData); reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); - reg_write(ohci, OHCI1394_LinkControlClear, - OHCI1394_LinkControl_rcvPhyPkt); reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_rcvSelfID | + OHCI1394_LinkControl_rcvPhyPkt | OHCI1394_LinkControl_cycleTimerEnable | OHCI1394_LinkControl_cycleMaster); reg_write(ohci, OHCI1394_ATRetries, OHCI1394_MAX_AT_REQ_RETRIES | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | - (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); + (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | + (200 << 16)); + + seconds = lower_32_bits(get_seconds()); + reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); + ohci->bus_time = seconds & ~0x3f; + + version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; + if (version >= OHCI_VERSION_1_1) { + reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, + 0xfffffffe); + card->broadcast_channel_auto_allocated = true; + } + + /* Get implemented bits of the priority arbitration request counter. */ + reg_write(ohci, OHCI1394_FairnessControl, 0x3f); + ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; + reg_write(ohci, OHCI1394_FairnessControl, 0); + card->priority_budget_implemented = ohci->pri_req_max != 0; ar_context_run(&ohci->ar_request_ctx); ar_context_run(&ohci->ar_response_ctx); @@ -1653,16 +1808,6 @@ static int ohci_enable(struct fw_card *card, reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); reg_write(ohci, OHCI1394_IntEventClear, ~0); reg_write(ohci, OHCI1394_IntMaskClear, ~0); - reg_write(ohci, OHCI1394_IntMaskSet, - OHCI1394_selfIDComplete | - OHCI1394_RQPkt | OHCI1394_RSPkt | - OHCI1394_reqTxComplete | OHCI1394_respTxComplete | - OHCI1394_isochRx | OHCI1394_isochTx | - OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | - OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | - OHCI1394_masterIntEnable); - if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) - reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); ret = configure_1394a_enhancements(ohci); if (ret < 0) @@ -1719,26 +1864,38 @@ static int ohci_enable(struct fw_card *card, reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); + if (!(ohci->quirks & QUIRK_NO_MSI)) + pci_enable_msi(dev); if (request_irq(dev->irq, irq_handler, - IRQF_SHARED, ohci_driver_name, ohci)) { - fw_error("Failed to allocate shared interrupt %d.\n", - dev->irq); + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, + ohci_driver_name, ohci)) { + fw_error("Failed to allocate interrupt %d.\n", dev->irq); + pci_disable_msi(dev); dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, ohci->config_rom, ohci->config_rom_bus); return -EIO; } + irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | + OHCI1394_RQPkt | OHCI1394_RSPkt | + OHCI1394_isochTx | OHCI1394_isochRx | + OHCI1394_postedWriteErr | + OHCI1394_selfIDComplete | + OHCI1394_regAccessFail | + OHCI1394_cycle64Seconds | + OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | + OHCI1394_masterIntEnable; + if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) + irqs |= OHCI1394_busReset; + reg_write(ohci, OHCI1394_IntMaskSet, irqs); + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable | OHCI1394_HCControl_BIBimageValid); flush_writes(ohci); - /* - * We are ready to go, initiate bus reset to finish the - * initialization. - */ - - fw_core_initiate_bus_reset(&ohci->card, 1); + /* We are ready to go, reset bus to finish initialization. */ + fw_schedule_bus_reset(&ohci->card, false, true); return 0; } @@ -1813,7 +1970,7 @@ static int ohci_set_config_rom(struct fw_card *card, * takes effect. */ if (ret == 0) - fw_core_initiate_bus_reset(&ohci->card, 1); + fw_schedule_bus_reset(&ohci->card, true, true); else dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, next_config_rom_bus); @@ -1903,61 +2060,117 @@ static int ohci_enable_phys_dma(struct fw_card *card, #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ } -static u32 cycle_timer_ticks(u32 cycle_timer) +static u32 ohci_read_csr(struct fw_card *card, int csr_offset) { - u32 ticks; + struct fw_ohci *ohci = fw_ohci(card); + unsigned long flags; + u32 value; + + switch (csr_offset) { + case CSR_STATE_CLEAR: + case CSR_STATE_SET: + if (ohci->is_root && + (reg_read(ohci, OHCI1394_LinkControlSet) & + OHCI1394_LinkControl_cycleMaster)) + value = CSR_STATE_BIT_CMSTR; + else + value = 0; + if (ohci->csr_state_setclear_abdicate) + value |= CSR_STATE_BIT_ABDICATE; - ticks = cycle_timer & 0xfff; - ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); - ticks += (3072 * 8000) * (cycle_timer >> 25); + return value; - return ticks; + case CSR_NODE_IDS: + return reg_read(ohci, OHCI1394_NodeID) << 16; + + case CSR_CYCLE_TIME: + return get_cycle_time(ohci); + + case CSR_BUS_TIME: + /* + * We might be called just after the cycle timer has wrapped + * around but just before the cycle64Seconds handler, so we + * better check here, too, if the bus time needs to be updated. + */ + spin_lock_irqsave(&ohci->lock, flags); + value = update_bus_time(ohci); + spin_unlock_irqrestore(&ohci->lock, flags); + return value; + + case CSR_BUSY_TIMEOUT: + value = reg_read(ohci, OHCI1394_ATRetries); + return (value >> 4) & 0x0ffff00f; + + case CSR_PRIORITY_BUDGET: + return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | + (ohci->pri_req_max << 8); + + default: + WARN_ON(1); + return 0; + } } -/* - * Some controllers exhibit one or more of the following bugs when updating the - * iso cycle timer register: - * - When the lowest six bits are wrapping around to zero, a read that happens - * at the same time will return garbage in the lowest ten bits. - * - When the cycleOffset field wraps around to zero, the cycleCount field is - * not incremented for about 60 ns. - * - Occasionally, the entire register reads zero. - * - * To catch these, we read the register three times and ensure that the - * difference between each two consecutive reads is approximately the same, i.e. - * less than twice the other. Furthermore, any negative difference indicates an - * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to - * execute, so we have enough precision to compute the ratio of the differences.) - */ -static u32 ohci_get_cycle_time(struct fw_card *card) +static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) { struct fw_ohci *ohci = fw_ohci(card); - u32 c0, c1, c2; - u32 t0, t1, t2; - s32 diff01, diff12; - int i; + unsigned long flags; - c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + switch (csr_offset) { + case CSR_STATE_CLEAR: + if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { + reg_write(ohci, OHCI1394_LinkControlClear, + OHCI1394_LinkControl_cycleMaster); + flush_writes(ohci); + } + if (value & CSR_STATE_BIT_ABDICATE) + ohci->csr_state_setclear_abdicate = false; + break; - if (ohci->quirks & QUIRK_CYCLE_TIMER) { - i = 0; - c1 = c2; - c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); - do { - c0 = c1; - c1 = c2; - c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); - t0 = cycle_timer_ticks(c0); - t1 = cycle_timer_ticks(c1); - t2 = cycle_timer_ticks(c2); - diff01 = t1 - t0; - diff12 = t2 - t1; - } while ((diff01 <= 0 || diff12 <= 0 || - diff01 / diff12 >= 2 || diff12 / diff01 >= 2) - && i++ < 20); - } + case CSR_STATE_SET: + if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_cycleMaster); + flush_writes(ohci); + } + if (value & CSR_STATE_BIT_ABDICATE) + ohci->csr_state_setclear_abdicate = true; + break; - return c2; + case CSR_NODE_IDS: + reg_write(ohci, OHCI1394_NodeID, value >> 16); + flush_writes(ohci); + break; + + case CSR_CYCLE_TIME: + reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); + reg_write(ohci, OHCI1394_IntEventSet, + OHCI1394_cycleInconsistent); + flush_writes(ohci); + break; + + case CSR_BUS_TIME: + spin_lock_irqsave(&ohci->lock, flags); + ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); + spin_unlock_irqrestore(&ohci->lock, flags); + break; + + case CSR_BUSY_TIMEOUT: + value = (value & 0xf) | ((value & 0xf) << 4) | + ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); + reg_write(ohci, OHCI1394_ATRetries, value); + flush_writes(ohci); + break; + + case CSR_PRIORITY_BUDGET: + reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); + flush_writes(ohci); + break; + + default: + WARN_ON(1); + break; + } } static void copy_iso_headers(struct iso_context *ctx, void *p) @@ -1992,10 +2205,9 @@ static int handle_ir_packet_per_buffer(struct context *context, __le32 *ir_header; void *p; - for (pd = d; pd <= last; pd++) { + for (pd = d; pd <= last; pd++) if (pd->transfer_status) break; - } if (pd > last) /* Descriptor(s) not done yet, stop iteration */ return 0; @@ -2005,16 +2217,38 @@ static int handle_ir_packet_per_buffer(struct context *context, if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { ir_header = (__le32 *) p; - ctx->base.callback(&ctx->base, - le32_to_cpu(ir_header[0]) & 0xffff, - ctx->header_length, ctx->header, - ctx->base.callback_data); + ctx->base.callback.sc(&ctx->base, + le32_to_cpu(ir_header[0]) & 0xffff, + ctx->header_length, ctx->header, + ctx->base.callback_data); ctx->header_length = 0; } return 1; } +/* d == last because each descriptor block is only a single descriptor. */ +static int handle_ir_buffer_fill(struct context *context, + struct descriptor *d, + struct descriptor *last) +{ + struct iso_context *ctx = + container_of(context, struct iso_context, context); + + if (!last->transfer_status) + /* Descriptor(s) not done yet, stop iteration */ + return 0; + + if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) + ctx->base.callback.mc(&ctx->base, + le32_to_cpu(last->data_address) + + le16_to_cpu(last->req_count) - + le16_to_cpu(last->res_count), + ctx->base.callback_data); + + return 1; +} + static int handle_it_packet(struct context *context, struct descriptor *d, struct descriptor *last) @@ -2040,71 +2274,118 @@ static int handle_it_packet(struct context *context, ctx->header_length += 4; } if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { - ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), - ctx->header_length, ctx->header, - ctx->base.callback_data); + ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), + ctx->header_length, ctx->header, + ctx->base.callback_data); ctx->header_length = 0; } return 1; } +static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) +{ + u32 hi = channels >> 32, lo = channels; + + reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); + reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); + reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); + reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); + mmiowb(); + ohci->mc_channels = channels; +} + static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, int type, int channel, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); - struct iso_context *ctx, *list; - descriptor_callback_t callback; - u64 *channels, dont_care = ~0ULL; - u32 *mask, regs; + struct iso_context *uninitialized_var(ctx); + descriptor_callback_t uninitialized_var(callback); + u64 *uninitialized_var(channels); + u32 *uninitialized_var(mask), uninitialized_var(regs); unsigned long flags; - int index, ret = -ENOMEM; + int index, ret = -EBUSY; + + spin_lock_irqsave(&ohci->lock, flags); - if (type == FW_ISO_CONTEXT_TRANSMIT) { - channels = &dont_care; - mask = &ohci->it_context_mask; - list = ohci->it_context_list; + switch (type) { + case FW_ISO_CONTEXT_TRANSMIT: + mask = &ohci->it_context_mask; callback = handle_it_packet; - } else { + index = ffs(*mask) - 1; + if (index >= 0) { + *mask &= ~(1 << index); + regs = OHCI1394_IsoXmitContextBase(index); + ctx = &ohci->it_context_list[index]; + } + break; + + case FW_ISO_CONTEXT_RECEIVE: channels = &ohci->ir_context_channels; - mask = &ohci->ir_context_mask; - list = ohci->ir_context_list; + mask = &ohci->ir_context_mask; callback = handle_ir_packet_per_buffer; - } + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; - spin_lock_irqsave(&ohci->lock, flags); - index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; - if (index >= 0) { - *channels &= ~(1ULL << channel); - *mask &= ~(1 << index); + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + mask = &ohci->ir_context_mask; + callback = handle_ir_buffer_fill; + index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; + if (index >= 0) { + ohci->mc_allocated = true; + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; + + default: + index = -1; + ret = -ENOSYS; } + spin_unlock_irqrestore(&ohci->lock, flags); if (index < 0) - return ERR_PTR(-EBUSY); - - if (type == FW_ISO_CONTEXT_TRANSMIT) - regs = OHCI1394_IsoXmitContextBase(index); - else - regs = OHCI1394_IsoRcvContextBase(index); + return ERR_PTR(ret); - ctx = &list[index]; memset(ctx, 0, sizeof(*ctx)); ctx->header_length = 0; ctx->header = (void *) __get_free_page(GFP_KERNEL); - if (ctx->header == NULL) + if (ctx->header == NULL) { + ret = -ENOMEM; goto out; - + } ret = context_init(&ctx->context, ohci, regs, callback); if (ret < 0) goto out_with_header; + if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) + set_multichannel_mask(ohci, 0); + return &ctx->base; out_with_header: free_page((unsigned long)ctx->header); out: spin_lock_irqsave(&ohci->lock, flags); + + switch (type) { + case FW_ISO_CONTEXT_RECEIVE: + *channels |= 1ULL << channel; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ohci->mc_allocated = false; + break; + } *mask |= 1 << index; + spin_unlock_irqrestore(&ohci->lock, flags); return ERR_PTR(ret); @@ -2115,10 +2396,11 @@ static int ohci_start_iso(struct fw_iso_context *base, { struct iso_context *ctx = container_of(base, struct iso_context, base); struct fw_ohci *ohci = ctx->context.ohci; - u32 control, match; + u32 control = IR_CONTEXT_ISOCH_HEADER, match; int index; - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + switch (ctx->base.type) { + case FW_ISO_CONTEXT_TRANSMIT: index = ctx - ohci->it_context_list; match = 0; if (cycle >= 0) @@ -2128,9 +2410,13 @@ static int ohci_start_iso(struct fw_iso_context *base, reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); context_run(&ctx->context, match); - } else { + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; + /* fall through */ + case FW_ISO_CONTEXT_RECEIVE: index = ctx - ohci->ir_context_list; - control = IR_CONTEXT_ISOCH_HEADER; match = (tags << 28) | (sync << 8) | ctx->base.channel; if (cycle >= 0) { match |= (cycle & 0x07fff) << 12; @@ -2141,6 +2427,7 @@ static int ohci_start_iso(struct fw_iso_context *base, reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); context_run(&ctx->context, control); + break; } return 0; @@ -2152,12 +2439,17 @@ static int ohci_stop_iso(struct fw_iso_context *base) struct iso_context *ctx = container_of(base, struct iso_context, base); int index; - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + switch (ctx->base.type) { + case FW_ISO_CONTEXT_TRANSMIT: index = ctx - ohci->it_context_list; reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); - } else { + break; + + case FW_ISO_CONTEXT_RECEIVE: + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: index = ctx - ohci->ir_context_list; reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); + break; } flush_writes(ohci); context_stop(&ctx->context); @@ -2178,24 +2470,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base) spin_lock_irqsave(&ohci->lock, flags); - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: index = ctx - ohci->it_context_list; ohci->it_context_mask |= 1 << index; - } else { + break; + + case FW_ISO_CONTEXT_RECEIVE: index = ctx - ohci->ir_context_list; ohci->ir_context_mask |= 1 << index; ohci->ir_context_channels |= 1ULL << base->channel; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + index = ctx - ohci->ir_context_list; + ohci->ir_context_mask |= 1 << index; + ohci->ir_context_channels |= ohci->mc_channels; + ohci->mc_channels = 0; + ohci->mc_allocated = false; + break; } spin_unlock_irqrestore(&ohci->lock, flags); } -static int ohci_queue_iso_transmit(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) +{ + struct fw_ohci *ohci = fw_ohci(base->card); + unsigned long flags; + int ret; + + switch (base->type) { + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + + spin_lock_irqsave(&ohci->lock, flags); + + /* Don't allow multichannel to grab other contexts' channels. */ + if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { + *channels = ohci->ir_context_channels; + ret = -EBUSY; + } else { + set_multichannel_mask(ohci, *channels); + ret = 0; + } + + spin_unlock_irqrestore(&ohci->lock, flags); + + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int queue_iso_transmit(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { - struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d, *last, *pd; struct fw_iso_packet *p; __le32 *header; @@ -2291,14 +2624,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, return 0; } -static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int queue_iso_packet_per_buffer(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { - struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d, *pd; - struct fw_iso_packet *p = packet; dma_addr_t d_bus, page_bus; u32 z, header_z, rest; int i, j, length; @@ -2308,14 +2639,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, * The OHCI controller puts the isochronous header and trailer in the * buffer, so we need at least 8 bytes. */ - packet_count = p->header_length / ctx->base.header_size; + packet_count = packet->header_length / ctx->base.header_size; header_size = max(ctx->base.header_size, (size_t)8); /* Get header size in number of descriptors. */ header_z = DIV_ROUND_UP(header_size, sizeof(*d)); page = payload >> PAGE_SHIFT; offset = payload & ~PAGE_MASK; - payload_per_buffer = p->payload_length / packet_count; + payload_per_buffer = packet->payload_length / packet_count; for (i = 0; i < packet_count; i++) { /* d points to the header descriptor */ @@ -2327,7 +2658,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, d->control = cpu_to_le16(DESCRIPTOR_STATUS | DESCRIPTOR_INPUT_MORE); - if (p->skip && i == 0) + if (packet->skip && i == 0) d->control |= cpu_to_le16(DESCRIPTOR_WAIT); d->req_count = cpu_to_le16(header_size); d->res_count = d->req_count; @@ -2360,7 +2691,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, pd->control = cpu_to_le16(DESCRIPTOR_STATUS | DESCRIPTOR_INPUT_LAST | DESCRIPTOR_BRANCH_ALWAYS); - if (p->interrupt && i == packet_count - 1) + if (packet->interrupt && i == packet_count - 1) pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); context_append(&ctx->context, d, z, header_z); @@ -2369,6 +2700,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, return 0; } +static int queue_iso_buffer_fill(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + struct descriptor *d; + dma_addr_t d_bus, page_bus; + int page, offset, rest, z, i, length; + + page = payload >> PAGE_SHIFT; + offset = payload & ~PAGE_MASK; + rest = packet->payload_length; + + /* We need one descriptor for each page in the buffer. */ + z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); + + if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) + return -EFAULT; + + for (i = 0; i < z; i++) { + d = context_get_descriptors(&ctx->context, 1, &d_bus); + if (d == NULL) + return -ENOMEM; + + d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | + DESCRIPTOR_BRANCH_ALWAYS); + if (packet->skip && i == 0) + d->control |= cpu_to_le16(DESCRIPTOR_WAIT); + if (packet->interrupt && i == z - 1) + d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); + + if (offset + rest < PAGE_SIZE) + length = rest; + else + length = PAGE_SIZE - offset; + d->req_count = cpu_to_le16(length); + d->res_count = d->req_count; + d->transfer_status = 0; + + page_bus = page_private(buffer->pages[page]); + d->data_address = cpu_to_le32(page_bus + offset); + + rest -= length; + offset = 0; + page++; + + context_append(&ctx->context, d, 1, 0); + } + + return 0; +} + static int ohci_queue_iso(struct fw_iso_context *base, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, @@ -2376,14 +2759,20 @@ static int ohci_queue_iso(struct fw_iso_context *base, { struct iso_context *ctx = container_of(base, struct iso_context, base); unsigned long flags; - int ret; + int ret = -ENOSYS; spin_lock_irqsave(&ctx->context.ohci->lock, flags); - if (base->type == FW_ISO_CONTEXT_TRANSMIT) - ret = ohci_queue_iso_transmit(base, packet, buffer, payload); - else - ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, - buffer, payload); + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: + ret = queue_iso_transmit(ctx, packet, buffer, payload); + break; + case FW_ISO_CONTEXT_RECEIVE: + ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); + break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); + break; + } spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); return ret; @@ -2391,16 +2780,19 @@ static int ohci_queue_iso(struct fw_iso_context *base, static const struct fw_card_driver ohci_driver = { .enable = ohci_enable, + .read_phy_reg = ohci_read_phy_reg, .update_phy_reg = ohci_update_phy_reg, .set_config_rom = ohci_set_config_rom, .send_request = ohci_send_request, .send_response = ohci_send_response, .cancel_packet = ohci_cancel_packet, .enable_phys_dma = ohci_enable_phys_dma, - .get_cycle_time = ohci_get_cycle_time, + .read_csr = ohci_read_csr, + .write_csr = ohci_write_csr, .allocate_iso_context = ohci_allocate_iso_context, .free_iso_context = ohci_free_iso_context, + .set_iso_channels = ohci_set_iso_channels, .queue_iso = ohci_queue_iso, .start_iso = ohci_start_iso, .stop_iso = ohci_stop_iso, @@ -2465,6 +2857,7 @@ static int __devinit pci_probe(struct pci_dev *dev, pci_set_drvdata(dev, ohci); spin_lock_init(&ohci->lock); + mutex_init(&ohci->phy_reg_mutex); tasklet_init(&ohci->bus_reset_tasklet, bus_reset_tasklet, (unsigned long)ohci); @@ -2625,6 +3018,7 @@ static void pci_remove(struct pci_dev *dev) context_release(&ohci->at_response_ctx); kfree(ohci->it_context_list); kfree(ohci->ir_context_list); + pci_disable_msi(dev); pci_iounmap(dev, ohci->registers); pci_release_region(dev, 0); pci_disable_device(dev); @@ -2642,6 +3036,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) software_reset(ohci); free_irq(dev->irq, ohci); + pci_disable_msi(dev); err = pci_save_state(dev); if (err) { fw_error("pci_save_state failed\n"); |