diff options
Diffstat (limited to 'drivers/scsi/isci')
-rw-r--r-- | drivers/scsi/isci/host.c | 742 | ||||
-rw-r--r-- | drivers/scsi/isci/host.h | 93 | ||||
-rw-r--r-- | drivers/scsi/isci/init.c | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/isci.h | 6 | ||||
-rw-r--r-- | drivers/scsi/isci/phy.c | 385 | ||||
-rw-r--r-- | drivers/scsi/isci/phy.h | 70 | ||||
-rw-r--r-- | drivers/scsi/isci/port.c | 551 | ||||
-rw-r--r-- | drivers/scsi/isci/port.h | 68 | ||||
-rw-r--r-- | drivers/scsi/isci/port_config.c | 132 | ||||
-rw-r--r-- | drivers/scsi/isci/probe_roms.c | 16 | ||||
-rw-r--r-- | drivers/scsi/isci/probe_roms.h | 38 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.c | 300 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.h | 90 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.c | 198 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.h | 30 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_table.c | 114 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_table.h | 16 | ||||
-rw-r--r-- | drivers/scsi/isci/request.c | 360 | ||||
-rw-r--r-- | drivers/scsi/isci/request.h | 52 | ||||
-rw-r--r-- | drivers/scsi/isci/sata.c | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/task.c | 24 | ||||
-rw-r--r-- | drivers/scsi/isci/unsolicited_frame_control.c | 57 | ||||
-rw-r--r-- | drivers/scsi/isci/unsolicited_frame_control.h | 42 |
23 files changed, 1449 insertions, 1943 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index bb298f8f609a..f31f64e4b713 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -180,8 +180,7 @@ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) handler(sm); } -static bool scic_sds_controller_completion_queue_has_entries( - struct isci_host *ihost) +static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) { u32 get_value = ihost->completion_queue_get; u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; @@ -193,9 +192,9 @@ static bool scic_sds_controller_completion_queue_has_entries( return false; } -static bool scic_sds_controller_isr(struct isci_host *ihost) +static bool sci_controller_isr(struct isci_host *ihost) { - if (scic_sds_controller_completion_queue_has_entries(ihost)) { + if (sci_controller_completion_queue_has_entries(ihost)) { return true; } else { /* @@ -219,13 +218,13 @@ irqreturn_t isci_msix_isr(int vec, void *data) { struct isci_host *ihost = data; - if (scic_sds_controller_isr(ihost)) + if (sci_controller_isr(ihost)) tasklet_schedule(&ihost->completion_tasklet); return IRQ_HANDLED; } -static bool scic_sds_controller_error_isr(struct isci_host *ihost) +static bool sci_controller_error_isr(struct isci_host *ihost) { u32 interrupt_status; @@ -252,35 +251,35 @@ static bool scic_sds_controller_error_isr(struct isci_host *ihost) return false; } -static void scic_sds_controller_task_completion(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) { - u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); + u32 index = SCU_GET_COMPLETION_INDEX(ent); struct isci_request *ireq = ihost->reqs[index]; /* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) && ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) - /* Yep this is a valid io request pass it along to the io request handler */ - scic_sds_io_request_tc_completion(ireq, completion_entry); + /* Yep this is a valid io request pass it along to the + * io request handler + */ + sci_io_request_tc_completion(ireq, ent); } -static void scic_sds_controller_sdma_completion(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) { u32 index; struct isci_request *ireq; struct isci_remote_device *idev; - index = SCU_GET_COMPLETION_INDEX(completion_entry); + index = SCU_GET_COMPLETION_INDEX(ent); - switch (scu_get_command_request_type(completion_entry)) { + switch (scu_get_command_request_type(ent)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: ireq = ihost->reqs[index]; dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", - __func__, completion_entry, ireq); + __func__, ent, ireq); /* @todo For a post TC operation we need to fail the IO * request */ @@ -290,20 +289,19 @@ static void scic_sds_controller_sdma_completion(struct isci_host *ihost, case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: idev = ihost->device_table[index]; dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", - __func__, completion_entry, idev); + __func__, ent, idev); /* @todo For a port RNC operation we need to fail the * device */ break; default: dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", - __func__, completion_entry); + __func__, ent); break; } } -static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) { u32 index; u32 frame_index; @@ -314,36 +312,36 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, enum sci_status result = SCI_FAILURE; - frame_index = SCU_GET_FRAME_INDEX(completion_entry); + frame_index = SCU_GET_FRAME_INDEX(ent); frame_header = ihost->uf_control.buffers.array[frame_index].header; ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; - if (SCU_GET_FRAME_ERROR(completion_entry)) { + if (SCU_GET_FRAME_ERROR(ent)) { /* * / @todo If the IAF frame or SIGNATURE FIS frame has an error will * / this cause a problem? We expect the phy initialization will * / fail if there is an error in the frame. */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return; } if (frame_header->is_address_frame) { - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; - result = scic_sds_phy_frame_handler(iphy, frame_index); + result = sci_phy_frame_handler(iphy, frame_index); } else { - index = SCU_GET_COMPLETION_INDEX(completion_entry); + index = SCU_GET_COMPLETION_INDEX(ent); if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { /* * This is a signature fis or a frame from a direct attached SATA * device that has not yet been created. In either case forwared * the frame to the PE and let it take care of the frame data. */ - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; - result = scic_sds_phy_frame_handler(iphy, frame_index); + result = sci_phy_frame_handler(iphy, frame_index); } else { if (index < ihost->remote_node_entries) idev = ihost->device_table[index]; @@ -351,9 +349,9 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, idev = NULL; if (idev != NULL) - result = scic_sds_remote_device_frame_handler(idev, frame_index); + result = sci_remote_device_frame_handler(idev, frame_index); else - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); } } @@ -364,17 +362,16 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, } } -static void scic_sds_controller_event_completion(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) { struct isci_remote_device *idev; struct isci_request *ireq; struct isci_phy *iphy; u32 index; - index = SCU_GET_COMPLETION_INDEX(completion_entry); + index = SCU_GET_COMPLETION_INDEX(ent); - switch (scu_get_event_type(completion_entry)) { + switch (scu_get_event_type(ent)) { case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: /* / @todo The driver did something wrong and we need to fix the condtion. */ dev_err(&ihost->pdev->dev, @@ -382,7 +379,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "0x%x\n", __func__, ihost, - completion_entry); + ent); break; case SCU_EVENT_TYPE_SMU_PCQ_ERROR: @@ -396,21 +393,21 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "event 0x%x\n", __func__, ihost, - completion_entry); + ent); break; case SCU_EVENT_TYPE_TRANSPORT_ERROR: ireq = ihost->reqs[index]; - scic_sds_io_request_event_handler(ireq, completion_entry); + sci_io_request_event_handler(ireq, ent); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: - switch (scu_get_event_specifier(completion_entry)) { + switch (scu_get_event_specifier(ent)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: ireq = ihost->reqs[index]; if (ireq != NULL) - scic_sds_io_request_event_handler(ireq, completion_entry); + sci_io_request_event_handler(ireq, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " @@ -418,14 +415,14 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "that doesnt exist.\n", __func__, ihost, - completion_entry); + ent); break; case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: idev = ihost->device_table[index]; if (idev != NULL) - scic_sds_remote_device_event_handler(idev, completion_entry); + sci_remote_device_event_handler(idev, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " @@ -433,7 +430,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "that doesnt exist.\n", __func__, ihost, - completion_entry); + ent); break; } @@ -448,9 +445,9 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, * direct error counter event to the phy object since that is where * we get the event notification. This is a type 4 event. */ case SCU_EVENT_TYPE_OSSP_EVENT: - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; - scic_sds_phy_event_handler(iphy, completion_entry); + sci_phy_event_handler(iphy, ent); break; case SCU_EVENT_TYPE_RNC_SUSPEND_TX: @@ -460,7 +457,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, idev = ihost->device_table[index]; if (idev != NULL) - scic_sds_remote_device_event_handler(idev, completion_entry); + sci_remote_device_event_handler(idev, ent); } else dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " @@ -468,7 +465,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "exist.\n", __func__, ihost, - completion_entry, + ent, index); break; @@ -477,15 +474,15 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown event code %x\n", __func__, - completion_entry); + ent); break; } } -static void scic_sds_controller_process_completions(struct isci_host *ihost) +static void sci_controller_process_completions(struct isci_host *ihost) { u32 completion_count = 0; - u32 completion_entry; + u32 ent; u32 get_index; u32 get_cycle; u32 event_get; @@ -509,7 +506,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) ) { completion_count++; - completion_entry = ihost->completion_queue[get_index]; + ent = ihost->completion_queue[get_index]; /* increment the get pointer and check for rollover to toggle the cycle bit */ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << @@ -519,19 +516,19 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) dev_dbg(&ihost->pdev->dev, "%s: completion queue entry:0x%08x\n", __func__, - completion_entry); + ent); - switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { + switch (SCU_GET_COMPLETION_TYPE(ent)) { case SCU_COMPLETION_TYPE_TASK: - scic_sds_controller_task_completion(ihost, completion_entry); + sci_controller_task_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_SDMA: - scic_sds_controller_sdma_completion(ihost, completion_entry); + sci_controller_sdma_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_UFI: - scic_sds_controller_unsolicited_frame(ihost, completion_entry); + sci_controller_unsolicited_frame(ihost, ent); break; case SCU_COMPLETION_TYPE_EVENT: @@ -540,7 +537,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); event_get = (event_get+1) & (SCU_MAX_EVENTS-1); - scic_sds_controller_event_completion(ihost, completion_entry); + sci_controller_event_completion(ihost, ent); break; } default: @@ -548,7 +545,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) "%s: SCIC Controller received unknown " "completion type %x\n", __func__, - completion_entry); + ent); break; } } @@ -575,7 +572,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) } -static void scic_sds_controller_error_handler(struct isci_host *ihost) +static void sci_controller_error_handler(struct isci_host *ihost) { u32 interrupt_status; @@ -583,9 +580,9 @@ static void scic_sds_controller_error_handler(struct isci_host *ihost) readl(&ihost->smu_registers->interrupt_status); if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && - scic_sds_controller_completion_queue_has_entries(ihost)) { + sci_controller_completion_queue_has_entries(ihost)) { - scic_sds_controller_process_completions(ihost); + sci_controller_process_completions(ihost); writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); } else { dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, @@ -607,13 +604,13 @@ irqreturn_t isci_intx_isr(int vec, void *data) irqreturn_t ret = IRQ_NONE; struct isci_host *ihost = data; - if (scic_sds_controller_isr(ihost)) { + if (sci_controller_isr(ihost)) { writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); tasklet_schedule(&ihost->completion_tasklet); ret = IRQ_HANDLED; - } else if (scic_sds_controller_error_isr(ihost)) { + } else if (sci_controller_error_isr(ihost)) { spin_lock(&ihost->scic_lock); - scic_sds_controller_error_handler(ihost); + sci_controller_error_handler(ihost); spin_unlock(&ihost->scic_lock); ret = IRQ_HANDLED; } @@ -625,8 +622,8 @@ irqreturn_t isci_error_isr(int vec, void *data) { struct isci_host *ihost = data; - if (scic_sds_controller_error_isr(ihost)) - scic_sds_controller_error_handler(ihost); + if (sci_controller_error_isr(ihost)) + sci_controller_error_handler(ihost); return IRQ_HANDLED; } @@ -670,8 +667,8 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) } /** - * scic_controller_get_suggested_start_timeout() - This method returns the - * suggested scic_controller_start() timeout amount. The user is free to + * sci_controller_get_suggested_start_timeout() - This method returns the + * suggested sci_controller_start() timeout amount. The user is free to * use any timeout value, but this method provides the suggested minimum * start timeout value. The returned value is based upon empirical * information determined as a result of interoperability testing. @@ -681,7 +678,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) * This method returns the number of milliseconds for the suggested start * operation timeout. */ -static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) +static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) { /* Validate the user supplied parameters. */ if (!ihost) @@ -706,19 +703,19 @@ static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); } -static void scic_controller_enable_interrupts(struct isci_host *ihost) +static void sci_controller_enable_interrupts(struct isci_host *ihost) { BUG_ON(ihost->smu_registers == NULL); writel(0, &ihost->smu_registers->interrupt_mask); } -void scic_controller_disable_interrupts(struct isci_host *ihost) +void sci_controller_disable_interrupts(struct isci_host *ihost) { BUG_ON(ihost->smu_registers == NULL); writel(0xffffffff, &ihost->smu_registers->interrupt_mask); } -static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost) +static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) { u32 port_task_scheduler_value; @@ -731,7 +728,7 @@ static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *iho &ihost->scu_registers->peg0.ptsg.control); } -static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) +static void sci_controller_assign_task_entries(struct isci_host *ihost) { u32 task_assignment; @@ -752,7 +749,7 @@ static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) } -static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost) +static void sci_controller_initialize_completion_queue(struct isci_host *ihost) { u32 index; u32 completion_queue_control_value; @@ -799,7 +796,7 @@ static void scic_sds_controller_initialize_completion_queue(struct isci_host *ih } } -static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) +static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) { u32 frame_queue_control_value; u32 frame_queue_get_value; @@ -826,22 +823,8 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_h &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); } -/** - * This method will attempt to transition into the ready state for the - * controller and indicate that the controller start operation has completed - * if all criteria are met. - * @scic: This parameter indicates the controller object for which - * to transition to ready. - * @status: This parameter indicates the status value to be pass into the call - * to scic_cb_controller_start_complete(). - * - * none. - */ -static void scic_sds_controller_transition_to_ready( - struct isci_host *ihost, - enum sci_status status) +static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) { - if (ihost->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports @@ -855,7 +838,7 @@ static void scic_sds_controller_transition_to_ready( static bool is_phy_starting(struct isci_phy *iphy) { - enum scic_sds_phy_states state; + enum sci_phy_states state; state = iphy->sm.current_state_id; switch (state) { @@ -876,16 +859,16 @@ static bool is_phy_starting(struct isci_phy *iphy) } /** - * scic_sds_controller_start_next_phy - start phy + * sci_controller_start_next_phy - start phy * @scic: controller * * If all the phys have been started, then attempt to transition the * controller to the READY state and inform the user - * (scic_cb_controller_start_complete()). + * (sci_cb_controller_start_complete()). */ -static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost) +static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) { - struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; + struct sci_oem_params *oem = &ihost->oem_parameters; struct isci_phy *iphy; enum sci_status status; @@ -924,7 +907,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos * The controller has successfully finished the start process. * Inform the SCI Core user and transition to the READY state. */ if (is_controller_start_complete == true) { - scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS); + sci_controller_transition_to_ready(ihost, SCI_SUCCESS); sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; } @@ -944,11 +927,11 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos * incorrectly for the PORT or it was never * assigned to a PORT */ - return scic_sds_controller_start_next_phy(ihost); + return sci_controller_start_next_phy(ihost); } } - status = scic_sds_phy_start(iphy); + status = sci_phy_start(iphy); if (status == SCI_SUCCESS) { sci_mod_timer(&ihost->phy_timer, @@ -985,7 +968,7 @@ static void phy_startup_timeout(unsigned long data) ihost->phy_startup_timer_pending = false; do { - status = scic_sds_controller_start_next_phy(ihost); + status = sci_controller_start_next_phy(ihost); } while (status != SCI_SUCCESS); done: @@ -997,7 +980,7 @@ static u16 isci_tci_active(struct isci_host *ihost) return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } -static enum sci_status scic_controller_start(struct isci_host *ihost, +static enum sci_status sci_controller_start(struct isci_host *ihost, u32 timeout) { enum sci_status result; @@ -1018,38 +1001,37 @@ static enum sci_status scic_controller_start(struct isci_host *ihost, isci_tci_free(ihost, index); /* Build the RNi free pool */ - scic_sds_remote_node_table_initialize( - &ihost->available_remote_nodes, - ihost->remote_node_entries); + sci_remote_node_table_initialize(&ihost->available_remote_nodes, + ihost->remote_node_entries); /* * Before anything else lets make sure we will not be * interrupted by the hardware. */ - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); /* Enable the port task scheduler */ - scic_sds_controller_enable_port_task_scheduler(ihost); + sci_controller_enable_port_task_scheduler(ihost); /* Assign all the task entries to ihost physical function */ - scic_sds_controller_assign_task_entries(ihost); + sci_controller_assign_task_entries(ihost); /* Now initialize the completion queue */ - scic_sds_controller_initialize_completion_queue(ihost); + sci_controller_initialize_completion_queue(ihost); /* Initialize the unsolicited frame queue for use */ - scic_sds_controller_initialize_unsolicited_frame_queue(ihost); + sci_controller_initialize_unsolicited_frame_queue(ihost); /* Start all of the ports on this controller */ for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; - result = scic_sds_port_start(iport); + result = sci_port_start(iport); if (result) return result; } - scic_sds_controller_start_next_phy(ihost); + sci_controller_start_next_phy(ihost); sci_mod_timer(&ihost->timer, timeout); @@ -1061,29 +1043,29 @@ static enum sci_status scic_controller_start(struct isci_host *ihost, void isci_host_scan_start(struct Scsi_Host *shost) { struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; - unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost); + unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); set_bit(IHOST_START_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); - scic_controller_start(ihost, tmo); - scic_controller_enable_interrupts(ihost); + sci_controller_start(ihost, tmo); + sci_controller_enable_interrupts(ihost); spin_unlock_irq(&ihost->scic_lock); } static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) { isci_host_change_state(ihost, isci_stopped); - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); clear_bit(IHOST_STOP_PENDING, &ihost->flags); wake_up(&ihost->eventq); } -static void scic_sds_controller_completion_handler(struct isci_host *ihost) +static void sci_controller_completion_handler(struct isci_host *ihost) { /* Empty out the completion queue */ - if (scic_sds_controller_completion_queue_has_entries(ihost)) - scic_sds_controller_process_completions(ihost); + if (sci_controller_completion_queue_has_entries(ihost)) + sci_controller_process_completions(ihost); /* Clear the interrupt and enable all interrupts again */ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); @@ -1116,7 +1098,7 @@ static void isci_host_completion_routine(unsigned long data) spin_lock_irq(&ihost->scic_lock); - scic_sds_controller_completion_handler(ihost); + sci_controller_completion_handler(ihost); /* Take the lists of completed I/Os from the host. */ @@ -1203,7 +1185,7 @@ static void isci_host_completion_routine(unsigned long data) } /** - * scic_controller_stop() - This method will stop an individual controller + * sci_controller_stop() - This method will stop an individual controller * object.This method will invoke the associated user callback upon * completion. The completion callback is called when the following * conditions are met: -# the method return status is SCI_SUCCESS. -# the @@ -1220,8 +1202,7 @@ static void isci_host_completion_routine(unsigned long data) * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the * controller is not either in the STARTED or STOPPED states. */ -static enum sci_status scic_controller_stop(struct isci_host *ihost, - u32 timeout) +static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) { if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, @@ -1236,7 +1217,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost, } /** - * scic_controller_reset() - This method will reset the supplied core + * sci_controller_reset() - This method will reset the supplied core * controller regardless of the state of said controller. This operation is * considered destructive. In other words, all current operations are wiped * out. No IO completions for outstanding devices occur. Outstanding IO @@ -1247,7 +1228,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost, * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if * the controller reset operation is unable to complete. */ -static enum sci_status scic_controller_reset(struct isci_host *ihost) +static enum sci_status sci_controller_reset(struct isci_host *ihost) { switch (ihost->sm.current_state_id) { case SCIC_RESET: @@ -1286,11 +1267,11 @@ void isci_host_deinit(struct isci_host *ihost) set_bit(IHOST_STOP_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); - scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); + sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); spin_unlock_irq(&ihost->scic_lock); wait_for_stop(ihost); - scic_controller_reset(ihost); + sci_controller_reset(ihost); /* Cancel any/all outstanding port timers */ for (i = 0; i < ihost->logical_port_entries; i++) { @@ -1329,11 +1310,8 @@ static void __iomem *smu_base(struct isci_host *isci_host) return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; } -static void isci_user_parameters_get( - struct isci_host *isci_host, - union scic_user_parameters *scic_user_params) +static void isci_user_parameters_get(struct sci_user_parameters *u) { - struct scic_sds_user_parameters *u = &scic_user_params->sds1; int i; for (i = 0; i < SCI_MAX_PHYS; i++) { @@ -1355,14 +1333,14 @@ static void isci_user_parameters_get( u->max_number_concurrent_device_spin_up = max_concurr_spinup; } -static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_change_state(&ihost->sm, SCIC_RESET); } -static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) +static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); @@ -1377,7 +1355,7 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 /** - * scic_controller_set_interrupt_coalescence() - This method allows the user to + * sci_controller_set_interrupt_coalescence() - This method allows the user to * configure the interrupt coalescence. * @controller: This parameter represents the handle to the controller object * for which its interrupt coalesce register is overridden. @@ -1394,9 +1372,9 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. */ static enum sci_status -scic_controller_set_interrupt_coalescence(struct isci_host *ihost, - u32 coalesce_number, - u32 coalesce_timeout) +sci_controller_set_interrupt_coalescence(struct isci_host *ihost, + u32 coalesce_number, + u32 coalesce_timeout) { u8 timeout_encode = 0; u32 min = 0; @@ -1489,23 +1467,23 @@ scic_controller_set_interrupt_coalescence(struct isci_host *ihost, } -static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* set the default interrupt coalescence number and timeout value. */ - scic_controller_set_interrupt_coalescence(ihost, 0x10, 250); + sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); } -static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) +static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* disable interrupt coalescence. */ - scic_controller_set_interrupt_coalescence(ihost, 0, 0); + sci_controller_set_interrupt_coalescence(ihost, 0, 0); } -static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) +static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) { u32 index; enum sci_status status; @@ -1514,7 +1492,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) status = SCI_SUCCESS; for (index = 0; index < SCI_MAX_PHYS; index++) { - phy_status = scic_sds_phy_stop(&ihost->phys[index]); + phy_status = sci_phy_stop(&ihost->phys[index]); if (phy_status != SCI_SUCCESS && phy_status != SCI_FAILURE_INVALID_STATE) { @@ -1531,7 +1509,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) return status; } -static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) +static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) { u32 index; enum sci_status port_status; @@ -1540,7 +1518,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; - port_status = scic_sds_port_stop(iport); + port_status = sci_port_stop(iport); if ((port_status != SCI_SUCCESS) && (port_status != SCI_FAILURE_INVALID_STATE)) { @@ -1558,7 +1536,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) return status; } -static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) +static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) { u32 index; enum sci_status status; @@ -1569,7 +1547,7 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) for (index = 0; index < ihost->remote_node_entries; index++) { if (ihost->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */ - device_status = scic_remote_device_stop(ihost->device_table[index], 0); + device_status = sci_remote_device_stop(ihost->device_table[index], 0); if ((device_status != SCI_SUCCESS) && (device_status != SCI_FAILURE_INVALID_STATE)) { @@ -1586,33 +1564,27 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) return status; } -static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* Stop all of the components for this controller */ - scic_sds_controller_stop_phys(ihost); - scic_sds_controller_stop_ports(ihost); - scic_sds_controller_stop_devices(ihost); + sci_controller_stop_phys(ihost); + sci_controller_stop_ports(ihost); + sci_controller_stop_devices(ihost); } -static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) +static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_del_timer(&ihost->timer); } - -/** - * scic_sds_controller_reset_hardware() - - * - * This method will reset the controller hardware. - */ -static void scic_sds_controller_reset_hardware(struct isci_host *ihost) +static void sci_controller_reset_hardware(struct isci_host *ihost) { /* Disable interrupts so we dont take any spurious interrupts */ - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); /* Reset the SCU */ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); @@ -1627,82 +1599,82 @@ static void scic_sds_controller_reset_hardware(struct isci_host *ihost) writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } -static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - scic_sds_controller_reset_hardware(ihost); + sci_controller_reset_hardware(ihost); sci_change_state(&ihost->sm, SCIC_RESET); } -static const struct sci_base_state scic_sds_controller_state_table[] = { +static const struct sci_base_state sci_controller_state_table[] = { [SCIC_INITIAL] = { - .enter_state = scic_sds_controller_initial_state_enter, + .enter_state = sci_controller_initial_state_enter, }, [SCIC_RESET] = {}, [SCIC_INITIALIZING] = {}, [SCIC_INITIALIZED] = {}, [SCIC_STARTING] = { - .exit_state = scic_sds_controller_starting_state_exit, + .exit_state = sci_controller_starting_state_exit, }, [SCIC_READY] = { - .enter_state = scic_sds_controller_ready_state_enter, - .exit_state = scic_sds_controller_ready_state_exit, + .enter_state = sci_controller_ready_state_enter, + .exit_state = sci_controller_ready_state_exit, }, [SCIC_RESETTING] = { - .enter_state = scic_sds_controller_resetting_state_enter, + .enter_state = sci_controller_resetting_state_enter, }, [SCIC_STOPPING] = { - .enter_state = scic_sds_controller_stopping_state_enter, - .exit_state = scic_sds_controller_stopping_state_exit, + .enter_state = sci_controller_stopping_state_enter, + .exit_state = sci_controller_stopping_state_exit, }, [SCIC_STOPPED] = {}, [SCIC_FAILED] = {} }; -static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost) +static void sci_controller_set_default_config_parameters(struct isci_host *ihost) { /* these defaults are overridden by the platform / firmware */ u16 index; /* Default to APC mode. */ - ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; + ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; /* Default to APC mode. */ - ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; + ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; /* Default to no SSC operation. */ - ihost->oem_parameters.sds1.controller.do_enable_ssc = false; + ihost->oem_parameters.controller.do_enable_ssc = false; /* Initialize all of the port parameter information to narrow ports. */ for (index = 0; index < SCI_MAX_PORTS; index++) { - ihost->oem_parameters.sds1.ports[index].phy_mask = 0; + ihost->oem_parameters.ports[index].phy_mask = 0; } /* Initialize all of the phy parameter information. */ for (index = 0; index < SCI_MAX_PHYS; index++) { /* Default to 6G (i.e. Gen 3) for now. */ - ihost->user_parameters.sds1.phys[index].max_speed_generation = 3; + ihost->user_parameters.phys[index].max_speed_generation = 3; /* the frequencies cannot be 0 */ - ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; - ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; - ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; + ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; + ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; + ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; /* * Previous Vitesse based expanders had a arbitration issue that * is worked around by having the upper 32-bits of SAS address * with a value greater then the Vitesse company identifier. * Hence, usage of 0x5FCFFFFF. */ - ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; - ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; + ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; + ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; } - ihost->user_parameters.sds1.stp_inactivity_timeout = 5; - ihost->user_parameters.sds1.ssp_inactivity_timeout = 5; - ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5; - ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20; - ihost->user_parameters.sds1.no_outbound_task_timeout = 20; + ihost->user_parameters.stp_inactivity_timeout = 5; + ihost->user_parameters.ssp_inactivity_timeout = 5; + ihost->user_parameters.stp_max_occupancy_timeout = 5; + ihost->user_parameters.ssp_max_occupancy_timeout = 20; + ihost->user_parameters.no_outbound_task_timeout = 20; } static void controller_timeout(unsigned long data) @@ -1718,7 +1690,7 @@ static void controller_timeout(unsigned long data) goto done; if (sm->current_state_id == SCIC_STARTING) - scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); + sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); else if (sm->current_state_id == SCIC_STOPPING) { sci_change_state(sm, SCIC_FAILED); isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); @@ -1732,45 +1704,29 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -/** - * scic_controller_construct() - This method will attempt to construct a - * controller object utilizing the supplied parameter information. - * @c: This parameter specifies the controller to be constructed. - * @scu_base: mapped base address of the scu registers - * @smu_base: mapped base address of the smu registers - * - * Indicate if the controller was successfully constructed or if it failed in - * some way. SCI_SUCCESS This value is returned if the controller was - * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned - * if the interrupt coalescence timer may cause SAS compliance issues for SMP - * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE - * This value is returned if the controller does not support the supplied type. - * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the - * controller does not support the supplied initialization data version. - */ -static enum sci_status scic_controller_construct(struct isci_host *ihost, - void __iomem *scu_base, - void __iomem *smu_base) +static enum sci_status sci_controller_construct(struct isci_host *ihost, + void __iomem *scu_base, + void __iomem *smu_base) { u8 i; - sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL); + sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); ihost->scu_registers = scu_base; ihost->smu_registers = smu_base; - scic_sds_port_configuration_agent_construct(&ihost->port_agent); + sci_port_configuration_agent_construct(&ihost->port_agent); /* Construct the ports for this controller */ for (i = 0; i < SCI_MAX_PORTS; i++) - scic_sds_port_construct(&ihost->ports[i], i, ihost); - scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); + sci_port_construct(&ihost->ports[i], i, ihost); + sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); /* Construct the phys for this controller */ for (i = 0; i < SCI_MAX_PHYS; i++) { /* Add all the PHYs to the dummy port */ - scic_sds_phy_construct(&ihost->phys[i], - &ihost->ports[SCI_MAX_PORTS], i); + sci_phy_construct(&ihost->phys[i], + &ihost->ports[SCI_MAX_PORTS], i); } ihost->invalid_phy_mask = 0; @@ -1778,12 +1734,12 @@ static enum sci_status scic_controller_construct(struct isci_host *ihost, sci_init_timer(&ihost->timer, controller_timeout); /* Initialize the User and OEM parameters to default values. */ - scic_sds_controller_set_default_config_parameters(ihost); + sci_controller_set_default_config_parameters(ihost); - return scic_controller_reset(ihost); + return sci_controller_reset(ihost); } -int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) +int sci_oem_parameters_validate(struct sci_oem_params *oem) { int i; @@ -1817,8 +1773,7 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) return 0; } -static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, - union scic_oem_parameters *scic_parms) +static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) { u32 state = ihost->sm.current_state_id; @@ -1826,9 +1781,8 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, state == SCIC_INITIALIZING || state == SCIC_INITIALIZED) { - if (scic_oem_parameters_validate(&scic_parms->sds1)) + if (sci_oem_parameters_validate(&ihost->oem_parameters)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; - ihost->oem_parameters.sds1 = scic_parms->sds1; return SCI_SUCCESS; } @@ -1836,13 +1790,6 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, return SCI_FAILURE_INVALID_STATE; } -void scic_oem_parameters_get( - struct isci_host *ihost, - union scic_oem_parameters *scic_parms) -{ - memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms)); -} - static void power_control_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; @@ -1873,13 +1820,13 @@ static void power_control_timeout(unsigned long data) continue; if (ihost->power_control.phys_granted_power >= - ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) + ihost->oem_parameters.controller.max_concurrent_dev_spin_up) break; ihost->power_control.requesters[i] = NULL; ihost->power_control.phys_waiting--; ihost->power_control.phys_granted_power++; - scic_sds_phy_consume_power_handler(iphy); + sci_phy_consume_power_handler(iphy); } /* @@ -1893,22 +1840,15 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -/** - * This method inserts the phy in the stagger spinup control queue. - * @scic: - * - * - */ -void scic_sds_controller_power_control_queue_insert( - struct isci_host *ihost, - struct isci_phy *iphy) +void sci_controller_power_control_queue_insert(struct isci_host *ihost, + struct isci_phy *iphy) { BUG_ON(iphy == NULL); if (ihost->power_control.phys_granted_power < - ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { + ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { ihost->power_control.phys_granted_power++; - scic_sds_phy_consume_power_handler(iphy); + sci_phy_consume_power_handler(iphy); /* * stop and start the power_control timer. When the timer fires, the @@ -1928,21 +1868,13 @@ void scic_sds_controller_power_control_queue_insert( } } -/** - * This method removes the phy from the stagger spinup control queue. - * @scic: - * - * - */ -void scic_sds_controller_power_control_queue_remove( - struct isci_host *ihost, - struct isci_phy *iphy) +void sci_controller_power_control_queue_remove(struct isci_host *ihost, + struct isci_phy *iphy) { BUG_ON(iphy == NULL); - if (ihost->power_control.requesters[iphy->phy_index] != NULL) { + if (ihost->power_control.requesters[iphy->phy_index]) ihost->power_control.phys_waiting--; - } ihost->power_control.requesters[iphy->phy_index] = NULL; } @@ -1952,9 +1884,9 @@ void scic_sds_controller_power_control_queue_remove( /* Initialize the AFE for this phy index. We need to read the AFE setup from * the OEM parameters */ -static void scic_sds_controller_afe_initialization(struct isci_host *ihost) +static void sci_controller_afe_initialization(struct isci_host *ihost) { - const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; + const struct sci_oem_params *oem = &ihost->oem_parameters; u32 afe_status; u32 phy_id; @@ -2111,7 +2043,7 @@ static void scic_sds_controller_afe_initialization(struct isci_host *ihost) udelay(AFE_REGISTER_WRITE_DELAY); } -static void scic_sds_controller_initialize_power_control(struct isci_host *ihost) +static void sci_controller_initialize_power_control(struct isci_host *ihost) { sci_init_timer(&ihost->power_control.timer, power_control_timeout); @@ -2122,7 +2054,7 @@ static void scic_sds_controller_initialize_power_control(struct isci_host *ihost ihost->power_control.phys_granted_power = 0; } -static enum sci_status scic_controller_initialize(struct isci_host *ihost) +static enum sci_status sci_controller_initialize(struct isci_host *ihost) { struct sci_base_state_machine *sm = &ihost->sm; enum sci_status result = SCI_FAILURE; @@ -2142,14 +2074,14 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) ihost->next_phy_to_start = 0; ihost->phy_startup_timer_pending = false; - scic_sds_controller_initialize_power_control(ihost); + sci_controller_initialize_power_control(ihost); /* * There is nothing to do here for B0 since we do not have to * program the AFE registers. * / @todo The AFE settings are supposed to be correct for the B0 but * / presently they seem to be wrong. */ - scic_sds_controller_afe_initialization(ihost); + sci_controller_afe_initialization(ihost); /* Take the hardware out of reset */ @@ -2206,24 +2138,22 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) * are accessed during the port initialization. */ for (i = 0; i < SCI_MAX_PHYS; i++) { - result = scic_sds_phy_initialize(&ihost->phys[i], - &ihost->scu_registers->peg0.pe[i].tl, - &ihost->scu_registers->peg0.pe[i].ll); + result = sci_phy_initialize(&ihost->phys[i], + &ihost->scu_registers->peg0.pe[i].tl, + &ihost->scu_registers->peg0.pe[i].ll); if (result != SCI_SUCCESS) goto out; } for (i = 0; i < ihost->logical_port_entries; i++) { - result = scic_sds_port_initialize(&ihost->ports[i], - &ihost->scu_registers->peg0.ptsg.port[i], - &ihost->scu_registers->peg0.ptsg.protocol_engine, - &ihost->scu_registers->peg0.viit[i]); + struct isci_port *iport = &ihost->ports[i]; - if (result != SCI_SUCCESS) - goto out; + iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; + iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; + iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; } - result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent); + result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); out: /* Advance the controller state machine */ @@ -2236,9 +2166,8 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) return result; } -static enum sci_status scic_user_parameters_set( - struct isci_host *ihost, - union scic_user_parameters *scic_parms) +static enum sci_status sci_user_parameters_set(struct isci_host *ihost, + struct sci_user_parameters *sci_parms) { u32 state = ihost->sm.current_state_id; @@ -2254,7 +2183,7 @@ static enum sci_status scic_user_parameters_set( for (index = 0; index < SCI_MAX_PHYS; index++) { struct sci_phy_user_params *user_phy; - user_phy = &scic_parms->sds1.phys[index]; + user_phy = &sci_parms->phys[index]; if (!((user_phy->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && @@ -2275,14 +2204,14 @@ static enum sci_status scic_user_parameters_set( return SCI_FAILURE_INVALID_PARAMETER_VALUE; } - if ((scic_parms->sds1.stp_inactivity_timeout == 0) || - (scic_parms->sds1.ssp_inactivity_timeout == 0) || - (scic_parms->sds1.stp_max_occupancy_timeout == 0) || - (scic_parms->sds1.ssp_max_occupancy_timeout == 0) || - (scic_parms->sds1.no_outbound_task_timeout == 0)) + if ((sci_parms->stp_inactivity_timeout == 0) || + (sci_parms->ssp_inactivity_timeout == 0) || + (sci_parms->stp_max_occupancy_timeout == 0) || + (sci_parms->ssp_max_occupancy_timeout == 0) || + (sci_parms->no_outbound_task_timeout == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; - memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms)); + memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); return SCI_SUCCESS; } @@ -2290,7 +2219,7 @@ static enum sci_status scic_user_parameters_set( return SCI_FAILURE_INVALID_STATE; } -static int scic_controller_mem_init(struct isci_host *ihost) +static int sci_controller_mem_init(struct isci_host *ihost) { struct device *dev = &ihost->pdev->dev; dma_addr_t dma; @@ -2307,7 +2236,7 @@ static int scic_controller_mem_init(struct isci_host *ihost) size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, - GFP_KERNEL); + GFP_KERNEL); if (!ihost->remote_node_context_table) return -ENOMEM; @@ -2323,7 +2252,7 @@ static int scic_controller_mem_init(struct isci_host *ihost) writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); - err = scic_sds_unsolicited_frame_control_construct(ihost); + err = sci_unsolicited_frame_control_construct(ihost); if (err) return err; @@ -2348,8 +2277,7 @@ int isci_host_init(struct isci_host *ihost) { int err = 0, i; enum sci_status status; - union scic_oem_parameters oem; - union scic_user_parameters scic_user_params; + struct sci_user_parameters sci_user_params; struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); spin_lock_init(&ihost->state_lock); @@ -2358,12 +2286,12 @@ int isci_host_init(struct isci_host *ihost) isci_host_change_state(ihost, isci_starting); - status = scic_controller_construct(ihost, scu_base(ihost), - smu_base(ihost)); + status = sci_controller_construct(ihost, scu_base(ihost), + smu_base(ihost)); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, - "%s: scic_controller_construct failed - status = %x\n", + "%s: sci_controller_construct failed - status = %x\n", __func__, status); return -ENODEV; @@ -2376,21 +2304,18 @@ int isci_host_init(struct isci_host *ihost) * grab initial values stored in the controller object for OEM and USER * parameters */ - isci_user_parameters_get(ihost, &scic_user_params); - status = scic_user_parameters_set(ihost, - &scic_user_params); + isci_user_parameters_get(&sci_user_params); + status = sci_user_parameters_set(ihost, &sci_user_params); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_user_parameters_set failed\n", + "%s: sci_user_parameters_set failed\n", __func__); return -ENODEV; } - scic_oem_parameters_get(ihost, &oem); - /* grab any OEM parameters specified in orom */ if (pci_info->orom) { - status = isci_parse_oem_parameters(&oem, + status = isci_parse_oem_parameters(&ihost->oem_parameters, pci_info->orom, ihost->id); if (status != SCI_SUCCESS) { @@ -2400,10 +2325,10 @@ int isci_host_init(struct isci_host *ihost) } } - status = scic_oem_parameters_set(ihost, &oem); + status = sci_oem_parameters_set(ihost); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_oem_parameters_set failed\n", + "%s: sci_oem_parameters_set failed\n", __func__); return -ENODEV; } @@ -2415,17 +2340,17 @@ int isci_host_init(struct isci_host *ihost) INIT_LIST_HEAD(&ihost->requests_to_errorback); spin_lock_irq(&ihost->scic_lock); - status = scic_controller_initialize(ihost); + status = sci_controller_initialize(ihost); spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_controller_initialize failed -" + "%s: sci_controller_initialize failed -" " status = 0x%x\n", __func__, status); return -ENODEV; } - err = scic_controller_mem_init(ihost); + err = sci_controller_mem_init(ihost); if (err) return err; @@ -2463,20 +2388,20 @@ int isci_host_init(struct isci_host *ihost) return 0; } -void scic_sds_controller_link_up(struct isci_host *ihost, - struct isci_port *iport, struct isci_phy *iphy) +void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, - iport, iphy); - scic_sds_controller_start_next_phy(ihost); + iport, iphy); + sci_controller_start_next_phy(ihost); break; case SCIC_READY: ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, - iport, iphy); + iport, iphy); break; default: dev_dbg(&ihost->pdev->dev, @@ -2486,8 +2411,8 @@ void scic_sds_controller_link_up(struct isci_host *ihost, } } -void scic_sds_controller_link_down(struct isci_host *ihost, - struct isci_port *iport, struct isci_phy *iphy) +void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: @@ -2505,12 +2430,7 @@ void scic_sds_controller_link_down(struct isci_host *ihost, } } -/** - * This is a helper method to determine if any remote devices on this - * controller are still in the stopping state. - * - */ -static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost) +static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) { u32 index; @@ -2523,12 +2443,8 @@ static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ih return false; } -/** - * This method is called by the remote device to inform the controller - * object that the remote device has stopped. - */ -void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, - struct isci_remote_device *idev) +void sci_controller_remote_device_stopped(struct isci_host *ihost, + struct isci_remote_device *idev) { if (ihost->sm.current_state_id != SCIC_STOPPING) { dev_dbg(&ihost->pdev->dev, @@ -2539,32 +2455,19 @@ void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, return; } - if (!scic_sds_controller_has_remote_devices_stopping(ihost)) { + if (!sci_controller_has_remote_devices_stopping(ihost)) sci_change_state(&ihost->sm, SCIC_STOPPED); - } } -/** - * This method will write to the SCU PCP register the request value. The method - * is used to suspend/resume ports, devices, and phys. - * @scic: - * - * - */ -void scic_sds_controller_post_request( - struct isci_host *ihost, - u32 request) +void sci_controller_post_request(struct isci_host *ihost, u32 request) { - dev_dbg(&ihost->pdev->dev, - "%s: SCIC Controller 0x%p post request 0x%08x\n", - __func__, - ihost, - request); + dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", + __func__, ihost->id, request); writel(request, &ihost->smu_registers->post_context_port); } -struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) { u16 task_index; u16 task_sequence; @@ -2599,15 +2502,14 @@ struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote * node index available. */ -enum sci_status scic_sds_controller_allocate_remote_node_context( - struct isci_host *ihost, - struct isci_remote_device *idev, - u16 *node_id) +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 *node_id) { u16 node_index; - u32 remote_node_count = scic_sds_remote_device_node_count(idev); + u32 remote_node_count = sci_remote_device_node_count(idev); - node_index = scic_sds_remote_node_table_allocate_remote_node( + node_index = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, remote_node_count ); @@ -2622,68 +2524,26 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( return SCI_FAILURE_INSUFFICIENT_RESOURCES; } -/** - * This method frees the remote node index back to the available pool. Once - * this is done the remote node context buffer is no longer valid and can - * not be used. - * @scic: - * @sci_dev: - * @node_id: - * - */ -void scic_sds_controller_free_remote_node_context( - struct isci_host *ihost, - struct isci_remote_device *idev, - u16 node_id) +void sci_controller_free_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 node_id) { - u32 remote_node_count = scic_sds_remote_device_node_count(idev); + u32 remote_node_count = sci_remote_device_node_count(idev); if (ihost->device_table[node_id] == idev) { ihost->device_table[node_id] = NULL; - scic_sds_remote_node_table_release_remote_node_index( + sci_remote_node_table_release_remote_node_index( &ihost->available_remote_nodes, remote_node_count, node_id ); } } -/** - * This method returns the union scu_remote_node_context for the specified remote - * node id. - * @scic: - * @node_id: - * - * union scu_remote_node_context* - */ -union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( - struct isci_host *ihost, - u16 node_id - ) { - if ( - (node_id < ihost->remote_node_entries) - && (ihost->device_table[node_id] != NULL) - ) { - return &ihost->remote_node_context_table[node_id]; - } - - return NULL; -} - -/** - * - * @resposne_buffer: This is the buffer into which the D2H register FIS will be - * constructed. - * @frame_header: This is the frame header returned by the hardware. - * @frame_buffer: This is the frame buffer returned by the hardware. - * - * This method will combind the frame header and frame buffer to create a SATA - * D2H register FIS none - */ -void scic_sds_controller_copy_sata_response( - void *response_buffer, - void *frame_header, - void *frame_buffer) +void sci_controller_copy_sata_response(void *response_buffer, + void *frame_header, + void *frame_buffer) { + /* XXX type safety? */ memcpy(response_buffer, frame_header, sizeof(u32)); memcpy(response_buffer + sizeof(u32), @@ -2691,21 +2551,9 @@ void scic_sds_controller_copy_sata_response( sizeof(struct dev_to_host_fis) - sizeof(u32)); } -/** - * This method releases the frame once this is done the frame is available for - * re-use by the hardware. The data contained in the frame header and frame - * buffer is no longer valid. The UF queue get pointer is only updated if UF - * control indicates this is appropriate. - * @scic: - * @frame_index: - * - */ -void scic_sds_controller_release_frame( - struct isci_host *ihost, - u32 frame_index) +void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) { - if (scic_sds_unsolicited_frame_control_release_frame( - &ihost->uf_control, frame_index) == true) + if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) writel(ihost->uf_control.get, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } @@ -2763,21 +2611,9 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) return SCI_FAILURE_INVALID_IO_TAG; } -/** - * scic_controller_start_io() - This method is called by the SCI user to - * send/start an IO request. If the method invocation is successful, then - * the IO request has been queued to the hardware for processing. - * @controller: the handle to the controller object for which to start an IO - * request. - * @remote_device: the handle to the remote device object for which to start an - * IO request. - * @io_request: the handle to the io request object to start. - * @io_tag: This parameter specifies a previously allocated IO tag that the - * user desires to be utilized for this request. - */ -enum sci_status scic_controller_start_io(struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_start_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; @@ -2786,36 +2622,23 @@ enum sci_status scic_controller_start_io(struct isci_host *ihost, return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_io(ihost, idev, ireq); + status = sci_remote_device_start_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); + sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); return SCI_SUCCESS; } -/** - * scic_controller_terminate_request() - This method is called by the SCI Core - * user to terminate an ongoing (i.e. started) core IO request. This does - * not abort the IO request at the target, but rather removes the IO request - * from the host controller. - * @controller: the handle to the controller object for which to terminate a - * request. - * @remote_device: the handle to the remote device object for which to - * terminate a request. - * @request: the handle to the io or task management request object to - * terminate. - * - * Indicate if the controller successfully began the terminate process for the - * IO request. SCI_SUCCESS if the terminate process was successfully started - * for the request. Determine the failure situations and return values. - */ -enum sci_status scic_controller_terminate_request( - struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_terminate_request(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { + /* terminate an ongoing (i.e. started) core IO request. This does not + * abort the IO request at the target, but rather removes the IO + * request from the host controller. + */ enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { @@ -2824,7 +2647,7 @@ enum sci_status scic_controller_terminate_request( return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_io_request_terminate(ireq); + status = sci_io_request_terminate(ireq); if (status != SCI_SUCCESS) return status; @@ -2832,27 +2655,25 @@ enum sci_status scic_controller_terminate_request( * Utilize the original post context command and or in the POST_TC_ABORT * request sub-type. */ - scic_sds_controller_post_request(ihost, - scic_sds_request_get_post_context(ireq) | - SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); + sci_controller_post_request(ihost, + ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); return SCI_SUCCESS; } /** - * scic_controller_complete_io() - This method will perform core specific + * sci_controller_complete_io() - This method will perform core specific * completion operations for an IO request. After this method is invoked, * the user should consider the IO request as invalid until it is properly * reused (i.e. re-constructed). - * @controller: The handle to the controller object for which to complete the + * @ihost: The handle to the controller object for which to complete the * IO request. - * @remote_device: The handle to the remote device object for which to complete + * @idev: The handle to the remote device object for which to complete * the IO request. - * @io_request: the handle to the io request object to complete. + * @ireq: the handle to the io request object to complete. */ -enum sci_status scic_controller_complete_io( - struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_complete_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; u16 index; @@ -2862,7 +2683,7 @@ enum sci_status scic_controller_complete_io( /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: - status = scic_sds_remote_device_complete_io(ihost, idev, ireq); + status = sci_remote_device_complete_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; @@ -2876,7 +2697,7 @@ enum sci_status scic_controller_complete_io( } -enum sci_status scic_controller_continue_io(struct isci_request *ireq) +enum sci_status sci_controller_continue_io(struct isci_request *ireq) { struct isci_host *ihost = ireq->owning_controller; @@ -2886,12 +2707,12 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) } set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); + sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); return SCI_SUCCESS; } /** - * scic_controller_start_task() - This method is called by the SCIC user to + * sci_controller_start_task() - This method is called by the SCIC user to * send/start a framework task management request. * @controller: the handle to the controller object for which to start the task * management request. @@ -2899,10 +2720,9 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) * the task management request. * @task_request: the handle to the task request object to start. */ -enum sci_task_status scic_controller_start_task( - struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_task_status sci_controller_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; @@ -2914,7 +2734,7 @@ enum sci_task_status scic_controller_start_task( return SCI_TASK_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_task(ihost, idev, ireq); + status = sci_remote_device_start_task(ihost, idev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); @@ -2928,8 +2748,8 @@ enum sci_task_status scic_controller_start_task( case SCI_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(ihost, - scic_sds_request_get_post_context(ireq)); + sci_controller_post_request(ihost, + sci_request_get_post_context(ireq)); break; default: break; diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 013f672a8fd7..d87f21de1807 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -69,12 +69,12 @@ struct scu_task_context; /** - * struct scic_power_control - + * struct sci_power_control - * * This structure defines the fields for managing power control for direct * attached disk devices. */ -struct scic_power_control { +struct sci_power_control { /** * This field is set when the power control timer is running and cleared when * it is not. @@ -99,18 +99,18 @@ struct scic_power_control { /** * This field is an array of phys that we are waiting on. The phys are direct - * mapped into requesters via struct scic_sds_phy.phy_index + * mapped into requesters via struct sci_phy.phy_index */ struct isci_phy *requesters[SCI_MAX_PHYS]; }; -struct scic_sds_port_configuration_agent; +struct sci_port_configuration_agent; typedef void (*port_config_fn)(struct isci_host *, - struct scic_sds_port_configuration_agent *, + struct sci_port_configuration_agent *, struct isci_port *, struct isci_phy *); -struct scic_sds_port_configuration_agent { +struct sci_port_configuration_agent { u16 phy_configured_mask; u16 phy_ready_mask; struct { @@ -149,13 +149,13 @@ struct isci_host { /* XXX can we time this externally */ struct sci_timer timer; /* XXX drop reference module params directly */ - union scic_user_parameters user_parameters; + struct sci_user_parameters user_parameters; /* XXX no need to be a union */ - union scic_oem_parameters oem_parameters; - struct scic_sds_port_configuration_agent port_agent; + struct sci_oem_params oem_parameters; + struct sci_port_configuration_agent port_agent; struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; - struct scic_remote_node_table available_remote_nodes; - struct scic_power_control power_control; + struct sci_remote_node_table available_remote_nodes; + struct sci_power_control power_control; u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; struct scu_task_context *task_context_table; dma_addr_t task_context_dma; @@ -165,7 +165,7 @@ struct isci_host { u32 logical_port_entries; u32 remote_node_entries; u32 task_context_entries; - struct scic_sds_unsolicited_frame_control uf_control; + struct sci_unsolicited_frame_control uf_control; /* phy startup */ struct sci_timer phy_timer; @@ -206,10 +206,10 @@ struct isci_host { }; /** - * enum scic_sds_controller_states - This enumeration depicts all the states + * enum sci_controller_states - This enumeration depicts all the states * for the common controller state machine. */ -enum scic_sds_controller_states { +enum sci_controller_states { /** * Simply the initial state for the base controller state machine. */ @@ -360,14 +360,14 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) } /** - * scic_sds_controller_get_protocol_engine_group() - + * sci_controller_get_protocol_engine_group() - * * This macro returns the protocol engine group for this controller object. * Presently we only support protocol engine group 0 so just return that */ -#define scic_sds_controller_get_protocol_engine_group(controller) 0 +#define sci_controller_get_protocol_engine_group(controller) 0 -/* see scic_controller_io_tag_allocate|free for how seq and tci are built */ +/* see sci_controller_io_tag_allocate|free for how seq and tci are built */ #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) /* these are returned by the hardware, so sanitize them */ @@ -375,7 +375,7 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) /* expander attached sata devices require 3 rnc slots */ -static inline int scic_sds_remote_device_node_count(struct isci_remote_device *idev) +static inline int sci_remote_device_node_count(struct isci_remote_device *idev) { struct domain_device *dev = idev->domain_dev; @@ -386,23 +386,23 @@ static inline int scic_sds_remote_device_node_count(struct isci_remote_device *i } /** - * scic_sds_controller_set_invalid_phy() - + * sci_controller_set_invalid_phy() - * * This macro will set the bit in the invalid phy mask for this controller * object. This is used to control messages reported for invalid link up * notifications. */ -#define scic_sds_controller_set_invalid_phy(controller, phy) \ +#define sci_controller_set_invalid_phy(controller, phy) \ ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) /** - * scic_sds_controller_clear_invalid_phy() - + * sci_controller_clear_invalid_phy() - * * This macro will clear the bit in the invalid phy mask for this controller * object. This is used to control messages reported for invalid link up * notifications. */ -#define scic_sds_controller_clear_invalid_phy(controller, phy) \ +#define sci_controller_clear_invalid_phy(controller, phy) \ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) static inline struct device *sciphy_to_dev(struct isci_phy *iphy) @@ -460,56 +460,53 @@ static inline bool is_c0(void) return isci_si_rev > ISCI_SI_REVB0; } -void scic_sds_controller_post_request(struct isci_host *ihost, +void sci_controller_post_request(struct isci_host *ihost, u32 request); -void scic_sds_controller_release_frame(struct isci_host *ihost, +void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index); -void scic_sds_controller_copy_sata_response(void *response_buffer, +void sci_controller_copy_sata_response(void *response_buffer, void *frame_header, void *frame_buffer); -enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost, +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 *node_id); -void scic_sds_controller_free_remote_node_context( +void sci_controller_free_remote_node_context( struct isci_host *ihost, struct isci_remote_device *idev, u16 node_id); -union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( - struct isci_host *ihost, - u16 node_id); -struct isci_request *scic_request_by_tag(struct isci_host *ihost, +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag); -void scic_sds_controller_power_control_queue_insert( +void sci_controller_power_control_queue_insert( struct isci_host *ihost, struct isci_phy *iphy); -void scic_sds_controller_power_control_queue_remove( +void sci_controller_power_control_queue_remove( struct isci_host *ihost, struct isci_phy *iphy); -void scic_sds_controller_link_up( +void sci_controller_link_up( struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); -void scic_sds_controller_link_down( +void sci_controller_link_down( struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); -void scic_sds_controller_remote_device_stopped( +void sci_controller_remote_device_stopped( struct isci_host *ihost, struct isci_remote_device *idev); -void scic_sds_controller_copy_task_context( +void sci_controller_copy_task_context( struct isci_host *ihost, struct isci_request *ireq); -void scic_sds_controller_register_setup(struct isci_host *ihost); +void sci_controller_register_setup(struct isci_host *ihost); -enum sci_status scic_controller_continue_io(struct isci_request *ireq); +enum sci_status sci_controller_continue_io(struct isci_request *ireq); int isci_host_scan_finished(struct Scsi_Host *, unsigned long); void isci_host_scan_start(struct Scsi_Host *); u16 isci_alloc_tag(struct isci_host *ihost); @@ -536,33 +533,33 @@ void isci_host_remote_device_start_complete( struct isci_remote_device *, enum sci_status); -void scic_controller_disable_interrupts( +void sci_controller_disable_interrupts( struct isci_host *ihost); -enum sci_status scic_controller_start_io( +enum sci_status sci_controller_start_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_task_status scic_controller_start_task( +enum sci_task_status sci_controller_start_task( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_controller_terminate_request( +enum sci_status sci_controller_terminate_request( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_controller_complete_io( +enum sci_status sci_controller_complete_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -void scic_sds_port_configuration_agent_construct( - struct scic_sds_port_configuration_agent *port_agent); +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent); -enum sci_status scic_sds_port_configuration_agent_initialize( +enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent); + struct sci_port_configuration_agent *port_agent); #endif diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68ca1a4f30af..8d9a8bfff4d5 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -484,7 +484,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic orom = isci_request_oprom(pdev); for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { - if (scic_oem_parameters_validate(&orom->ctrl[i])) { + if (sci_oem_parameters_validate(&orom->ctrl[i])) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); devm_kfree(&pdev->dev, orom); @@ -554,7 +554,7 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev) for_each_isci_host(i, ihost, pdev) { isci_unregister(ihost); isci_host_deinit(ihost); - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); } } diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 207328369edd..3afccfcb94e1 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -304,7 +304,7 @@ enum sci_status { * This member indicates that the operation failed, the failure is * controller implementation specific, and the response data associated * with the request is not valid. You can query for the controller - * specific error information via scic_controller_get_request_status() + * specific error information via sci_controller_get_request_status() */ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, @@ -395,7 +395,7 @@ enum sci_status { /** * This value indicates that an unsupported PCI device ID has been * specified. This indicates that attempts to invoke - * scic_library_allocate_controller() will fail. + * sci_library_allocate_controller() will fail. */ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID @@ -493,7 +493,7 @@ irqreturn_t isci_error_isr(int vec, void *data); /* * Each timer is associated with a cancellation flag that is set when * del_timer() is called and checked in the timer callback function. This - * is needed since del_timer_sync() cannot be called with scic_lock held. + * is needed since del_timer_sync() cannot be called with sci_lock held. * For deinit however, del_timer_sync() is used without holding the lock. */ struct sci_timer { diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index ca96b5ad0d52..0df9f713f487 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -67,25 +67,13 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) return iphy->max_negotiated_speed; } -/* - * ***************************************************************************** - * * SCIC SDS PHY Internal Methods - * ***************************************************************************** */ - -/** - * This method will initialize the phy transport layer registers - * @sci_phy: - * @transport_layer_registers - * - * enum sci_status - */ -static enum sci_status scic_sds_phy_transport_layer_initialization( - struct isci_phy *iphy, - struct scu_transport_layer_registers __iomem *transport_layer_registers) +static enum sci_status +sci_phy_transport_layer_initialization(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *reg) { u32 tl_control; - iphy->transport_layer_registers = transport_layer_registers; + iphy->transport_layer_registers = reg; writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, &iphy->transport_layer_registers->stp_rni); @@ -101,32 +89,23 @@ static enum sci_status scic_sds_phy_transport_layer_initialization( return SCI_SUCCESS; } -/** - * This method will initialize the phy link layer registers - * @sci_phy: - * @link_layer_registers: - * - * enum sci_status - */ static enum sci_status -scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, - struct scu_link_layer_registers __iomem *link_layer_registers) +sci_phy_link_layer_initialization(struct isci_phy *iphy, + struct scu_link_layer_registers __iomem *reg) { - struct isci_host *ihost = - iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; int phy_idx = iphy->phy_index; - struct sci_phy_user_params *phy_user = - &ihost->user_parameters.sds1.phys[phy_idx]; + struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx]; struct sci_phy_oem_params *phy_oem = - &ihost->oem_parameters.sds1.phys[phy_idx]; + &ihost->oem_parameters.phys[phy_idx]; u32 phy_configuration; - struct scic_phy_cap phy_cap; + struct sci_phy_cap phy_cap; u32 parity_check = 0; u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; - iphy->link_layer_registers = link_layer_registers; + iphy->link_layer_registers = reg; /* Set our IDENTIFY frame data */ #define SCI_END_DEVICE 0x01 @@ -169,7 +148,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, phy_cap.gen3_no_ssc = 1; phy_cap.gen2_no_ssc = 1; phy_cap.gen1_no_ssc = 1; - if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) { + if (ihost->oem_parameters.controller.do_enable_ssc == true) { phy_cap.gen3_ssc = 1; phy_cap.gen2_ssc = 1; phy_cap.gen1_ssc = 1; @@ -216,7 +195,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, &iphy->link_layer_registers->afe_lookup_table_control); llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, - (u8)ihost->user_parameters.sds1.no_outbound_task_timeout); + (u8)ihost->user_parameters.no_outbound_task_timeout); switch(phy_user->max_speed_generation) { case SCIC_SDS_PARM_GEN3_SPEED: @@ -289,7 +268,7 @@ done: struct isci_port *phy_get_non_dummy_port( struct isci_phy *iphy) { - if (scic_sds_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) + if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) return NULL; return iphy->owning_port; @@ -302,7 +281,7 @@ struct isci_port *phy_get_non_dummy_port( * * */ -void scic_sds_phy_set_port( +void sci_phy_set_port( struct isci_phy *iphy, struct isci_port *iport) { @@ -310,33 +289,23 @@ void scic_sds_phy_set_port( if (iphy->bcn_received_while_port_unassigned) { iphy->bcn_received_while_port_unassigned = false; - scic_sds_port_broadcast_change_received(iphy->owning_port, iphy); + sci_port_broadcast_change_received(iphy->owning_port, iphy); } } -/** - * This method will initialize the constructed phy - * @sci_phy: - * @link_layer_registers: - * - * enum sci_status - */ -enum sci_status scic_sds_phy_initialize( - struct isci_phy *iphy, - struct scu_transport_layer_registers __iomem *transport_layer_registers, - struct scu_link_layer_registers __iomem *link_layer_registers) +enum sci_status sci_phy_initialize(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *tl, + struct scu_link_layer_registers __iomem *ll) { /* Perfrom the initialization of the TL hardware */ - scic_sds_phy_transport_layer_initialization( - iphy, - transport_layer_registers); + sci_phy_transport_layer_initialization(iphy, tl); /* Perofrm the initialization of the PE hardware */ - scic_sds_phy_link_layer_initialization(iphy, link_layer_registers); + sci_phy_link_layer_initialization(iphy, ll); - /* - * There is nothing that needs to be done in this state just - * transition to the stopped state. */ + /* There is nothing that needs to be done in this state just + * transition to the stopped state + */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; @@ -351,9 +320,7 @@ enum sci_status scic_sds_phy_initialize( * This will either be the RNi for the device or an invalid RNi if there * is no current device assigned to the phy. */ -void scic_sds_phy_setup_transport( - struct isci_phy *iphy, - u32 device_id) +void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) { u32 tl_control; @@ -368,15 +335,7 @@ void scic_sds_phy_setup_transport( writel(tl_control, &iphy->transport_layer_registers->control); } -/** - * - * @sci_phy: The phy object to be suspended. - * - * This function will perform the register reads/writes to suspend the SCU - * hardware protocol engine. none - */ -static void scic_sds_phy_suspend( - struct isci_phy *iphy) +static void sci_phy_suspend(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; @@ -386,12 +345,10 @@ static void scic_sds_phy_suspend( writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); - scic_sds_phy_setup_transport( - iphy, - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); + sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } -void scic_sds_phy_resume(struct isci_phy *iphy) +void sci_phy_resume(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; @@ -402,34 +359,28 @@ void scic_sds_phy_resume(struct isci_phy *iphy) &iphy->link_layer_registers->phy_configuration); } -void scic_sds_phy_get_sas_address(struct isci_phy *iphy, - struct sci_sas_address *sas_address) +void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { - sas_address->high = readl(&iphy->link_layer_registers->source_sas_address_high); - sas_address->low = readl(&iphy->link_layer_registers->source_sas_address_low); + sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); + sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); } -void scic_sds_phy_get_attached_sas_address(struct isci_phy *iphy, - struct sci_sas_address *sas_address) +void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { struct sas_identify_frame *iaf; iaf = &iphy->frame_rcvd.iaf; - memcpy(sas_address, iaf->sas_addr, SAS_ADDR_SIZE); + memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); } -void scic_sds_phy_get_protocols(struct isci_phy *iphy, - struct scic_phy_proto *protocols) +void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) { - protocols->all = - (u16)(readl(&iphy-> - link_layer_registers->transmit_identification) & - 0x0000FFFF); + proto->all = readl(&iphy->link_layer_registers->transmit_identification); } -enum sci_status scic_sds_phy_start(struct isci_phy *iphy) +enum sci_status sci_phy_start(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_STOPPED) { dev_dbg(sciphy_to_dev(iphy), @@ -441,9 +392,9 @@ enum sci_status scic_sds_phy_start(struct isci_phy *iphy) return SCI_SUCCESS; } -enum sci_status scic_sds_phy_stop(struct isci_phy *iphy) +enum sci_status sci_phy_stop(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_INITIAL: @@ -467,9 +418,9 @@ enum sci_status scic_sds_phy_stop(struct isci_phy *iphy) return SCI_SUCCESS; } -enum sci_status scic_sds_phy_reset(struct isci_phy *iphy) +enum sci_status sci_phy_reset(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_READY) { dev_dbg(sciphy_to_dev(iphy), @@ -481,9 +432,9 @@ enum sci_status scic_sds_phy_reset(struct isci_phy *iphy) return SCI_SUCCESS; } -enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy) +enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_SAS_POWER: { @@ -528,55 +479,37 @@ enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy) } } -/* - * ***************************************************************************** - * * SCIC SDS PHY HELPER FUNCTIONS - * ***************************************************************************** */ - - -/** - * - * @sci_phy: The phy object that received SAS PHY DETECTED. - * - * This method continues the link training for the phy as if it were a SAS PHY - * instead of a SATA PHY. This is done because the completion queue had a SAS - * PHY DETECTED event when the state machine was expecting a SATA PHY event. - * none - */ -static void scic_sds_phy_start_sas_link_training( - struct isci_phy *iphy) +static void sci_phy_start_sas_link_training(struct isci_phy *iphy) { + /* continue the link training for the phy as if it were a SAS PHY + * instead of a SATA PHY. This is done because the completion queue had a SAS + * PHY DETECTED event when the state machine was expecting a SATA PHY event. + */ u32 phy_control; - phy_control = - readl(&iphy->link_layer_registers->phy_configuration); + phy_control = readl(&iphy->link_layer_registers->phy_configuration); phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); writel(phy_control, - &iphy->link_layer_registers->phy_configuration); + &iphy->link_layer_registers->phy_configuration); sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; } -/** - * - * @sci_phy: The phy object that received a SATA SPINUP HOLD event - * - * This method continues the link training for the phy as if it were a SATA PHY - * instead of a SAS PHY. This is done because the completion queue had a SATA - * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none - */ -static void scic_sds_phy_start_sata_link_training( - struct isci_phy *iphy) +static void sci_phy_start_sata_link_training(struct isci_phy *iphy) { + /* This method continues the link training for the phy as if it were a SATA PHY + * instead of a SAS PHY. This is done because the completion queue had a SATA + * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none + */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; } /** - * scic_sds_phy_complete_link_training - perform processing common to + * sci_phy_complete_link_training - perform processing common to * all protocols upon completion of link training. * @sci_phy: This parameter specifies the phy object for which link training * has completed. @@ -586,30 +519,28 @@ static void scic_sds_phy_start_sata_link_training( * sub-state machine. * */ -static void scic_sds_phy_complete_link_training( - struct isci_phy *iphy, - enum sas_linkrate max_link_rate, - u32 next_state) +static void sci_phy_complete_link_training(struct isci_phy *iphy, + enum sas_linkrate max_link_rate, + u32 next_state) { iphy->max_negotiated_speed = max_link_rate; sci_change_state(&iphy->sm, next_state); } -enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, - u32 event_code) +enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_OSSP_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); iphy->is_in_link_training = true; break; case SCU_EVENT_SATA_SPINUP_HOLD: - scic_sds_phy_start_sata_link_training(iphy); + sci_phy_start_sata_link_training(iphy); iphy->is_in_link_training = true; break; default: @@ -630,30 +561,24 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, break; case SCU_EVENT_SAS_15: case SCU_EVENT_SAS_15_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_1_5_GBPS, - SCI_PHY_SUB_AWAIT_IAF_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_30: case SCU_EVENT_SAS_30_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_3_0_GBPS, - SCI_PHY_SUB_AWAIT_IAF_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_60: case SCU_EVENT_SAS_60_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_6_0_GBPS, - SCI_PHY_SUB_AWAIT_IAF_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* * We were doing SAS PHY link training and received a SATA PHY event * continue OOB/SN as if this were a SATA PHY */ - scic_sds_phy_start_sata_link_training(iphy); + sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ @@ -673,14 +598,14 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* Backup the state machine */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* We were doing SAS PHY link training and received a * SATA PHY event continue OOB/SN as if this were a * SATA PHY */ - scic_sds_phy_start_sata_link_training(iphy); + sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: case SCU_EVENT_LINK_FAILURE: @@ -727,7 +652,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; default: @@ -760,7 +685,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; default: dev_warn(sciphy_to_dev(iphy), @@ -781,24 +706,18 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, break; case SCU_EVENT_SATA_15: case SCU_EVENT_SATA_15_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_1_5_GBPS, - SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_30: case SCU_EVENT_SATA_30_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_3_0_GBPS, - SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_60: case SCU_EVENT_SATA_60_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_6_0_GBPS, - SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ @@ -808,7 +727,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, /* * There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; default: dev_warn(sciphy_to_dev(iphy), @@ -851,7 +770,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, case SCU_EVENT_BROADCAST_CHANGE: /* Broadcast change received. Notify the port. */ if (phy_get_non_dummy_port(iphy) != NULL) - scic_sds_port_broadcast_change_received(iphy->owning_port, iphy); + sci_port_broadcast_change_received(iphy->owning_port, iphy); else iphy->bcn_received_while_port_unassigned = true; break; @@ -886,10 +805,9 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, } } -enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, - u32 frame_index) +enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; struct isci_host *ihost = iphy->owning_port->owning_controller; enum sci_status result; unsigned long flags; @@ -899,9 +817,9 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, u32 *frame_words; struct sas_identify_frame iaf; - result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, - frame_index, - (void **)&frame_words); + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_words); if (result != SCI_SUCCESS) return result; @@ -933,15 +851,15 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, "unexpected frame id %x\n", __func__, frame_index); - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return result; } case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { struct dev_to_host_fis *frame_header; u32 *fis_frame_data; - result = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_phy_get_controller(iphy)->uf_control), + result = sci_unsolicited_frame_control_get_header( + &(sci_phy_get_controller(iphy)->uf_control), frame_index, (void **)&frame_header); @@ -950,14 +868,14 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, if ((frame_header->fis_type == FIS_REGD2H) && !(frame_header->status & ATA_BUSY)) { - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, - frame_index, - (void **)&fis_frame_data); + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&fis_frame_data); spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); - scic_sds_controller_copy_sata_response(&iphy->frame_rcvd.fis, - frame_header, - fis_frame_data); + sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, + frame_header, + fis_frame_data); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); /* got IAF we can now go to the await spinup semaphore state */ @@ -971,7 +889,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, __func__, frame_index); /* Regardless of the result we are done with this frame with it */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return result; } @@ -983,7 +901,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, } -static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -991,71 +909,71 @@ static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_m sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); } -static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_insert(ihost, iphy); + sci_controller_power_control_queue_insert(ihost, iphy); } -static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_remove(ihost, iphy); + sci_controller_power_control_queue_remove(ihost, iphy); } -static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_insert(ihost, iphy); + sci_controller_power_control_queue_insert(ihost, iphy); } -static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_remove(ihost, iphy); + sci_controller_power_control_queue_remove(ihost, iphy); } -static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } -static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } -static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } -static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } -static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - if (scic_sds_port_link_detected(iphy->owning_port, iphy)) { + if (sci_port_link_detected(iphy->owning_port, iphy)) { /* * Clear the PE suspend condition so we can actually @@ -1063,7 +981,7 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas * The hardware will not respond to the XRDY until the PE * suspend condition is cleared. */ - scic_sds_phy_resume(iphy); + sci_phy_resume(iphy); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SIGNATURE_FIS_TIMEOUT); @@ -1071,14 +989,14 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas iphy->is_in_link_training = false; } -static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } -static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1169,7 +1087,7 @@ static void scu_link_layer_tx_hard_reset( &iphy->link_layer_registers->phy_configuration); } -static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1182,12 +1100,12 @@ static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) scu_link_layer_stop_protocol_engine(iphy); if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) - scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy), + sci_controller_link_down(sci_phy_get_controller(iphy), phy_get_non_dummy_port(iphy), iphy); } -static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1199,31 +1117,31 @@ static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) iphy->bcn_received_while_port_unassigned = false; if (iphy->sm.previous_state_id == SCI_PHY_READY) - scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy), + sci_controller_link_down(sci_phy_get_controller(iphy), phy_get_non_dummy_port(iphy), iphy); sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); } -static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - scic_sds_controller_link_up(scic_sds_phy_get_controller(iphy), + sci_controller_link_up(sci_phy_get_controller(iphy), phy_get_non_dummy_port(iphy), iphy); } -static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm) +static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - scic_sds_phy_suspend(iphy); + sci_phy_suspend(iphy); } -static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1231,7 +1149,7 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm * the resetting state we don't notify the user regarding link up and * link down notifications */ - scic_sds_port_deactivate_phy(iphy->owning_port, iphy, false); + sci_port_deactivate_phy(iphy->owning_port, iphy, false); if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { scu_link_layer_tx_hard_reset(iphy); @@ -1243,57 +1161,57 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm } } -static const struct sci_base_state scic_sds_phy_state_table[] = { +static const struct sci_base_state sci_phy_state_table[] = { [SCI_PHY_INITIAL] = { }, [SCI_PHY_STOPPED] = { - .enter_state = scic_sds_phy_stopped_state_enter, + .enter_state = sci_phy_stopped_state_enter, }, [SCI_PHY_STARTING] = { - .enter_state = scic_sds_phy_starting_state_enter, + .enter_state = sci_phy_starting_state_enter, }, [SCI_PHY_SUB_INITIAL] = { - .enter_state = scic_sds_phy_starting_initial_substate_enter, + .enter_state = sci_phy_starting_initial_substate_enter, }, [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, [SCI_PHY_SUB_AWAIT_SAS_POWER] = { - .enter_state = scic_sds_phy_starting_await_sas_power_substate_enter, - .exit_state = scic_sds_phy_starting_await_sas_power_substate_exit, + .enter_state = sci_phy_starting_await_sas_power_substate_enter, + .exit_state = sci_phy_starting_await_sas_power_substate_exit, }, [SCI_PHY_SUB_AWAIT_SATA_POWER] = { - .enter_state = scic_sds_phy_starting_await_sata_power_substate_enter, - .exit_state = scic_sds_phy_starting_await_sata_power_substate_exit + .enter_state = sci_phy_starting_await_sata_power_substate_enter, + .exit_state = sci_phy_starting_await_sata_power_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { - .enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter, - .exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit + .enter_state = sci_phy_starting_await_sata_phy_substate_enter, + .exit_state = sci_phy_starting_await_sata_phy_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { - .enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter, - .exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit + .enter_state = sci_phy_starting_await_sata_speed_substate_enter, + .exit_state = sci_phy_starting_await_sata_speed_substate_exit }, [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { - .enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter, - .exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit + .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, + .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit }, [SCI_PHY_SUB_FINAL] = { - .enter_state = scic_sds_phy_starting_final_substate_enter, + .enter_state = sci_phy_starting_final_substate_enter, }, [SCI_PHY_READY] = { - .enter_state = scic_sds_phy_ready_state_enter, - .exit_state = scic_sds_phy_ready_state_exit, + .enter_state = sci_phy_ready_state_enter, + .exit_state = sci_phy_ready_state_exit, }, [SCI_PHY_RESETTING] = { - .enter_state = scic_sds_phy_resetting_state_enter, + .enter_state = sci_phy_resetting_state_enter, }, [SCI_PHY_FINAL] = { }, }; -void scic_sds_phy_construct(struct isci_phy *iphy, +void sci_phy_construct(struct isci_phy *iphy, struct isci_port *iport, u8 phy_index) { - sci_init_sm(&iphy->sm, scic_sds_phy_state_table, SCI_PHY_INITIAL); + sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); /* Copy the rest of the input data to our locals */ iphy->owning_port = iport; @@ -1309,14 +1227,13 @@ void scic_sds_phy_construct(struct isci_phy *iphy, void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) { - union scic_oem_parameters oem; + struct sci_oem_params *oem = &ihost->oem_parameters; u64 sci_sas_addr; __be64 sas_addr; - scic_oem_parameters_get(ihost, &oem); - sci_sas_addr = oem.sds1.phys[index].sas_address.high; + sci_sas_addr = oem->phys[index].sas_address.high; sci_sas_addr <<= 32; - sci_sas_addr |= oem.sds1.phys[index].sas_address.low; + sci_sas_addr |= oem->phys[index].sas_address.low; sas_addr = cpu_to_be64(sci_sas_addr); memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); @@ -1365,14 +1282,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, switch (func) { case PHY_FUNC_DISABLE: spin_lock_irqsave(&ihost->scic_lock, flags); - scic_sds_phy_stop(iphy); + sci_phy_stop(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_LINK_RESET: spin_lock_irqsave(&ihost->scic_lock, flags); - scic_sds_phy_stop(iphy); - scic_sds_phy_start(iphy); + sci_phy_stop(iphy); + sci_phy_start(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 19aa444517b4..5d2c1b4906a3 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h @@ -76,7 +76,7 @@ */ #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 -enum scic_sds_phy_protocol { +enum sci_phy_protocol { SCIC_SDS_PHY_PROTOCOL_UNKNOWN, SCIC_SDS_PHY_PROTOCOL_SAS, SCIC_SDS_PHY_PROTOCOL_SATA, @@ -95,7 +95,7 @@ struct isci_phy { struct sci_base_state_machine sm; struct isci_port *owning_port; enum sas_linkrate max_negotiated_speed; - enum scic_sds_phy_protocol protocol; + enum sci_phy_protocol protocol; u8 phy_index; bool bcn_received_while_port_unassigned; bool is_in_link_training; @@ -118,7 +118,7 @@ static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy) return iphy; } -struct scic_phy_cap { +struct sci_phy_cap { union { struct { /* @@ -147,7 +147,7 @@ struct scic_phy_cap { } __packed; /* this data structure reflects the link layer transmit identification reg */ -struct scic_phy_proto { +struct sci_phy_proto { union { struct { u16 _r_a:1; @@ -167,12 +167,12 @@ struct scic_phy_proto { /** - * struct scic_phy_properties - This structure defines the properties common to + * struct sci_phy_properties - This structure defines the properties common to * all phys that can be retrieved. * * */ -struct scic_phy_properties { +struct sci_phy_properties { /** * This field specifies the port that currently contains the * supplied phy. This field may be set to NULL @@ -194,12 +194,12 @@ struct scic_phy_properties { }; /** - * struct scic_sas_phy_properties - This structure defines the properties, + * struct sci_sas_phy_properties - This structure defines the properties, * specific to a SAS phy, that can be retrieved. * * */ -struct scic_sas_phy_properties { +struct sci_sas_phy_properties { /** * This field delineates the Identify Address Frame received * from the remote end point. @@ -210,17 +210,17 @@ struct scic_sas_phy_properties { * This field delineates the Phy capabilities structure received * from the remote end point. */ - struct scic_phy_cap rcvd_cap; + struct sci_phy_cap rcvd_cap; }; /** - * struct scic_sata_phy_properties - This structure defines the properties, + * struct sci_sata_phy_properties - This structure defines the properties, * specific to a SATA phy, that can be retrieved. * * */ -struct scic_sata_phy_properties { +struct sci_sata_phy_properties { /** * This field delineates the signature FIS received from the * attached target. @@ -236,12 +236,12 @@ struct scic_sata_phy_properties { }; /** - * enum scic_phy_counter_id - This enumeration depicts the various pieces of + * enum sci_phy_counter_id - This enumeration depicts the various pieces of * optional information that can be retrieved for a specific phy. * * */ -enum scic_phy_counter_id { +enum sci_phy_counter_id { /** * This PHY information field tracks the number of frames received. */ @@ -344,7 +344,7 @@ enum scic_phy_counter_id { SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR }; -enum scic_sds_phy_states { +enum sci_phy_states { /** * Simply the initial state for the base domain state machine. */ @@ -441,77 +441,77 @@ enum scic_sds_phy_states { }; /** - * scic_sds_phy_get_index() - + * sci_phy_get_index() - * * This macro returns the phy index for the specified phy */ -#define scic_sds_phy_get_index(phy) \ +#define sci_phy_get_index(phy) \ ((phy)->phy_index) /** - * scic_sds_phy_get_controller() - This macro returns the controller for this + * sci_phy_get_controller() - This macro returns the controller for this * phy * * */ -#define scic_sds_phy_get_controller(phy) \ - (scic_sds_port_get_controller((phy)->owning_port)) +#define sci_phy_get_controller(phy) \ + (sci_port_get_controller((phy)->owning_port)) -void scic_sds_phy_construct( +void sci_phy_construct( struct isci_phy *iphy, struct isci_port *iport, u8 phy_index); struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy); -void scic_sds_phy_set_port( +void sci_phy_set_port( struct isci_phy *iphy, struct isci_port *iport); -enum sci_status scic_sds_phy_initialize( +enum sci_status sci_phy_initialize( struct isci_phy *iphy, struct scu_transport_layer_registers __iomem *transport_layer_registers, struct scu_link_layer_registers __iomem *link_layer_registers); -enum sci_status scic_sds_phy_start( +enum sci_status sci_phy_start( struct isci_phy *iphy); -enum sci_status scic_sds_phy_stop( +enum sci_status sci_phy_stop( struct isci_phy *iphy); -enum sci_status scic_sds_phy_reset( +enum sci_status sci_phy_reset( struct isci_phy *iphy); -void scic_sds_phy_resume( +void sci_phy_resume( struct isci_phy *iphy); -void scic_sds_phy_setup_transport( +void sci_phy_setup_transport( struct isci_phy *iphy, u32 device_id); -enum sci_status scic_sds_phy_event_handler( +enum sci_status sci_phy_event_handler( struct isci_phy *iphy, u32 event_code); -enum sci_status scic_sds_phy_frame_handler( +enum sci_status sci_phy_frame_handler( struct isci_phy *iphy, u32 frame_index); -enum sci_status scic_sds_phy_consume_power_handler( +enum sci_status sci_phy_consume_power_handler( struct isci_phy *iphy); -void scic_sds_phy_get_sas_address( +void sci_phy_get_sas_address( struct isci_phy *iphy, struct sci_sas_address *sas_address); -void scic_sds_phy_get_attached_sas_address( +void sci_phy_get_attached_sas_address( struct isci_phy *iphy, struct sci_sas_address *sas_address); -struct scic_phy_proto; -void scic_sds_phy_get_protocols( +struct sci_phy_proto; +void sci_phy_get_protocols( struct isci_phy *iphy, - struct scic_phy_proto *protocols); + struct sci_phy_proto *protocols); enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy); struct isci_host; diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index c434d5a0effa..1822ed68409e 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -74,57 +74,35 @@ static void isci_port_change_state(struct isci_port *iport, enum isci_status sta spin_unlock_irqrestore(&iport->state_lock, flags); } -/* - * This function will indicate which protocols are supported by this port. - * @sci_port: a handle corresponding to the SAS port for which to return the - * supported protocols. - * @protocols: This parameter specifies a pointer to a data structure - * which the core will copy the protocol values for the port from the - * transmit_identification register. - */ -static void -scic_sds_port_get_protocols(struct isci_port *iport, - struct scic_phy_proto *protocols) +static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) { u8 index; - protocols->all = 0; - + proto->all = 0; for (index = 0; index < SCI_MAX_PHYS; index++) { - if (iport->phy_table[index] != NULL) { - scic_sds_phy_get_protocols(iport->phy_table[index], - protocols); - } + struct isci_phy *iphy = iport->phy_table[index]; + + if (!iphy) + continue; + sci_phy_get_protocols(iphy, proto); } } -/** - * This method requests a list (mask) of the phys contained in the supplied SAS - * port. - * @sci_port: a handle corresponding to the SAS port for which to return the - * phy mask. - * - * Return a bit mask indicating which phys are a part of this port. Each bit - * corresponds to a phy identifier (e.g. bit 0 = phy id 0). - */ -static u32 scic_sds_port_get_phys(struct isci_port *iport) +static u32 sci_port_get_phys(struct isci_port *iport) { u32 index; u32 mask; mask = 0; - - for (index = 0; index < SCI_MAX_PHYS; index++) { - if (iport->phy_table[index] != NULL) { + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) mask |= (1 << index); - } - } return mask; } /** - * scic_port_get_properties() - This method simply returns the properties + * sci_port_get_properties() - This method simply returns the properties * regarding the port, such as: physical index, protocols, sas address, etc. * @port: this parameter specifies the port for which to retrieve the physical * index. @@ -136,22 +114,22 @@ static u32 scic_sds_port_get_phys(struct isci_port *iport) * value is returned if the specified port is not valid. When this value is * returned, no data is copied to the properties output parameter. */ -static enum sci_status scic_port_get_properties(struct isci_port *iport, - struct scic_port_properties *prop) +static enum sci_status sci_port_get_properties(struct isci_port *iport, + struct sci_port_properties *prop) { if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) return SCI_FAILURE_INVALID_PORT; - prop->index = iport->logical_port_index; - prop->phy_mask = scic_sds_port_get_phys(iport); - scic_sds_port_get_sas_address(iport, &prop->local.sas_address); - scic_sds_port_get_protocols(iport, &prop->local.protocols); - scic_sds_port_get_attached_sas_address(iport, &prop->remote.sas_address); + prop->index = iport->logical_port_index; + prop->phy_mask = sci_port_get_phys(iport); + sci_port_get_sas_address(iport, &prop->local.sas_address); + sci_port_get_protocols(iport, &prop->local.protocols); + sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); return SCI_SUCCESS; } -static void scic_port_bcn_enable(struct isci_port *iport) +static void sci_port_bcn_enable(struct isci_port *iport) { struct isci_phy *iphy; u32 val; @@ -167,7 +145,7 @@ static void scic_port_bcn_enable(struct isci_port *iport) } } -/* called under scic_lock to stabilize phy:port associations */ +/* called under sci_lock to stabilize phy:port associations */ void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) { int i; @@ -209,7 +187,7 @@ static void isci_port_bc_change_received(struct isci_host *ihost, ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD); } - scic_port_bcn_enable(iport); + sci_port_bcn_enable(iport); } static void isci_port_link_up(struct isci_host *isci_host, @@ -217,7 +195,7 @@ static void isci_port_link_up(struct isci_host *isci_host, struct isci_phy *iphy) { unsigned long flags; - struct scic_port_properties properties; + struct sci_port_properties properties; unsigned long success = true; BUG_ON(iphy->isci_port != NULL); @@ -232,7 +210,7 @@ static void isci_port_link_up(struct isci_host *isci_host, isci_port_change_state(iphy->isci_port, isci_starting); - scic_port_get_properties(iport, &properties); + sci_port_get_properties(iport, &properties); if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { u64 attached_sas_address; @@ -245,7 +223,7 @@ static void isci_port_link_up(struct isci_host *isci_host, * automagically assign a SAS address to the end device * for the purpose of creating a port. This SAS address * will not be the same as assigned to the PHY and needs - * to be obtained from struct scic_port_properties properties. + * to be obtained from struct sci_port_properties properties. */ attached_sas_address = properties.remote.sas_address.high; attached_sas_address <<= 32; @@ -399,50 +377,40 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port, * doesn't preclude all configurations. It merely ensures that a phy is part * of the allowable set of phy identifiers for that port. For example, one * could assign phy 3 to port 0 and no other phys. Please refer to - * scic_sds_port_is_phy_mask_valid() for information regarding whether the + * sci_port_is_phy_mask_valid() for information regarding whether the * phy_mask for a port can be supported. bool true if this is a valid phy * assignment for the port false if this is not a valid phy assignment for the * port */ -bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport, - u32 phy_index) +bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) { + struct isci_host *ihost = iport->owning_controller; + struct sci_user_parameters *user = &ihost->user_parameters; + /* Initialize to invalid value. */ u32 existing_phy_index = SCI_MAX_PHYS; u32 index; - if ((iport->physical_port_index == 1) && (phy_index != 1)) { + if ((iport->physical_port_index == 1) && (phy_index != 1)) return false; - } - if (iport->physical_port_index == 3 && phy_index != 3) { + if (iport->physical_port_index == 3 && phy_index != 3) return false; - } - if ( - (iport->physical_port_index == 2) - && ((phy_index == 0) || (phy_index == 1)) - ) { + if (iport->physical_port_index == 2 && + (phy_index == 0 || phy_index == 1)) return false; - } - for (index = 0; index < SCI_MAX_PHYS; index++) { - if ((iport->phy_table[index] != NULL) - && (index != phy_index)) { + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index] && index != phy_index) existing_phy_index = index; - } - } - /* - * Ensure that all of the phys in the port are capable of - * operating at the same maximum link rate. */ - if ( - (existing_phy_index < SCI_MAX_PHYS) - && (iport->owning_controller->user_parameters.sds1.phys[ - phy_index].max_speed_generation != - iport->owning_controller->user_parameters.sds1.phys[ - existing_phy_index].max_speed_generation) - ) + /* Ensure that all of the phys in the port are capable of + * operating at the same maximum link rate. + */ + if (existing_phy_index < SCI_MAX_PHYS && + user->phys[phy_index].max_speed_generation != + user->phys[existing_phy_index].max_speed_generation) return false; return true; @@ -460,7 +428,7 @@ bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport, * phy mask can be supported. true if this is a valid phy assignment for the * port false if this is not a valid phy assignment for the port */ -static bool scic_sds_port_is_phy_mask_valid( +static bool sci_port_is_phy_mask_valid( struct isci_port *iport, u32 phy_mask) { @@ -493,10 +461,10 @@ static bool scic_sds_port_is_phy_mask_valid( * the port. Currently, the lowest order phy that is connected is returned. * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is * returned if there are no currently active (i.e. connected to a remote end - * point) phys contained in the port. All other values specify a struct scic_sds_phy + * point) phys contained in the port. All other values specify a struct sci_phy * object that is active in the port. */ -static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *iport) +static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) { u32 index; struct isci_phy *iphy; @@ -506,14 +474,14 @@ static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *ipor * connected to the remote end-point. */ iphy = iport->phy_table[index]; - if (iphy && scic_sds_port_active_phy(iport, iphy)) + if (iphy && sci_port_active_phy(iport, iphy)) return iphy; } return NULL; } -static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) +static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) { /* Check to see if we can add this phy to a port * that means that the phy is not part of a port and that the port does @@ -521,13 +489,13 @@ static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isc */ if (!iport->phy_table[iphy->phy_index] && !phy_get_non_dummy_port(iphy) && - scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* Phy is being added in the stopped state so we are in MPC mode * make logical port index = physical port index */ iport->logical_port_index = iport->physical_port_index; iport->phy_table[iphy->phy_index] = iphy; - scic_sds_phy_set_port(iphy, iport); + sci_phy_set_port(iphy, iport); return SCI_SUCCESS; } @@ -535,8 +503,7 @@ static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isc return SCI_FAILURE; } -static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, - struct isci_phy *iphy) +static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) { /* Make sure that this phy is part of this port */ if (iport->phy_table[iphy->phy_index] == iphy && @@ -544,7 +511,7 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, struct isci_host *ihost = iport->owning_controller; /* Yep it is assigned to this port so remove it */ - scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); + sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); iport->phy_table[iphy->phy_index] = NULL; return SCI_SUCCESS; } @@ -552,45 +519,18 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, return SCI_FAILURE; } - -/** - * This method requests the SAS address for the supplied SAS port from the SCI - * implementation. - * @sci_port: a handle corresponding to the SAS port for which to return the - * SAS address. - * @sas_address: This parameter specifies a pointer to a SAS address structure - * into which the core will copy the SAS address for the port. - * - */ -void scic_sds_port_get_sas_address( - struct isci_port *iport, - struct sci_sas_address *sas_address) +void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) { u32 index; - sas_address->high = 0; - sas_address->low = 0; - - for (index = 0; index < SCI_MAX_PHYS; index++) { - if (iport->phy_table[index] != NULL) { - scic_sds_phy_get_sas_address(iport->phy_table[index], sas_address); - } - } + sas->high = 0; + sas->low = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) + sci_phy_get_sas_address(iport->phy_table[index], sas); } -/* - * This function requests the SAS address for the device directly attached to - * this SAS port. - * @sci_port: a handle corresponding to the SAS port for which to return the - * SAS address. - * @sas_address: This parameter specifies a pointer to a SAS address structure - * into which the core will copy the SAS address for the device directly - * attached to the port. - * - */ -void scic_sds_port_get_attached_sas_address( - struct isci_port *iport, - struct sci_sas_address *sas_address) +void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) { struct isci_phy *iphy; @@ -598,23 +538,22 @@ void scic_sds_port_get_attached_sas_address( * Ensure that the phy is both part of the port and currently * connected to the remote end-point. */ - iphy = scic_sds_port_get_a_connected_phy(iport); + iphy = sci_port_get_a_connected_phy(iport); if (iphy) { if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { - scic_sds_phy_get_attached_sas_address(iphy, - sas_address); + sci_phy_get_attached_sas_address(iphy, sas); } else { - scic_sds_phy_get_sas_address(iphy, sas_address); - sas_address->low += iphy->phy_index; + sci_phy_get_sas_address(iphy, sas); + sas->low += iphy->phy_index; } } else { - sas_address->high = 0; - sas_address->low = 0; + sas->high = 0; + sas->low = 0; } } /** - * scic_sds_port_construct_dummy_rnc() - create dummy rnc for si workaround + * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround * * @sci_port: logical port on which we need to create the remote node context * @rni: remote node index for this remote node context. @@ -623,7 +562,7 @@ void scic_sds_port_get_attached_sas_address( * This structure will be posted to the hardware to work around a scheduler * error in the hardware. */ -static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) +static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) { union scu_remote_node_context *rnc; @@ -651,7 +590,7 @@ static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) * structure will be posted to the hardwre to work around a scheduler error * in the hardware. */ -static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) +static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) { struct isci_host *ihost = iport->owning_controller; struct scu_task_context *task_context; @@ -671,7 +610,7 @@ static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) task_context->task_phase = 0x01; } -static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) +static void sci_port_destroy_dummy_resources(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; @@ -679,93 +618,43 @@ static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) isci_free_tag(ihost, iport->reserved_tag); if (iport->reserved_rni != SCU_DUMMY_INDEX) - scic_sds_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, + sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, 1, iport->reserved_rni); iport->reserved_rni = SCU_DUMMY_INDEX; iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; } -/** - * This method performs initialization of the supplied port. Initialization - * includes: - state machine initialization - member variable initialization - * - configuring the phy_mask - * @sci_port: - * @transport_layer_registers: - * @port_task_scheduler_registers: - * @port_configuration_regsiter: - * - * enum sci_status SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION This value is returned - * if the phy being added to the port - */ -enum sci_status scic_sds_port_initialize( - struct isci_port *iport, - void __iomem *port_task_scheduler_registers, - void __iomem *port_configuration_regsiter, - void __iomem *viit_registers) -{ - iport->port_task_scheduler_registers = port_task_scheduler_registers; - iport->port_pe_configuration_register = port_configuration_regsiter; - iport->viit_registers = viit_registers; - - return SCI_SUCCESS; -} - - -/** - * This method assigns the direct attached device ID for this port. - * - * @param[in] iport The port for which the direct attached device id is to - * be assigned. - * @param[in] device_id The direct attached device ID to assign to the port. - * This will be the RNi for the device - */ -void scic_sds_port_setup_transports( - struct isci_port *iport, - u32 device_id) +void sci_port_setup_transports(struct isci_port *iport, u32 device_id) { u8 index; for (index = 0; index < SCI_MAX_PHYS; index++) { if (iport->active_phy_mask & (1 << index)) - scic_sds_phy_setup_transport(iport->phy_table[index], device_id); + sci_phy_setup_transport(iport->phy_table[index], device_id); } } -/** - * - * @sci_port: This is the port on which the phy should be enabled. - * @sci_phy: This is the specific phy which to enable. - * @do_notify_user: This parameter specifies whether to inform the user (via - * scic_cb_port_link_up()) as to the fact that a new phy as become ready. - * - * This function will activate the phy in the port. - * Activation includes: - adding - * the phy to the port - enabling the Protocol Engine in the silicon. - - * notifying the user that the link is up. none - */ -static void scic_sds_port_activate_phy(struct isci_port *iport, - struct isci_phy *iphy, - bool do_notify_user) +static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) { struct isci_host *ihost = iport->owning_controller; if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) - scic_sds_phy_resume(iphy); + sci_phy_resume(iphy); iport->active_phy_mask |= 1 << iphy->phy_index; - scic_sds_controller_clear_invalid_phy(ihost, iphy); + sci_controller_clear_invalid_phy(ihost, iphy); if (do_notify_user == true) isci_port_link_up(ihost, iport, iphy); } -void scic_sds_port_deactivate_phy(struct isci_port *iport, - struct isci_phy *iphy, - bool do_notify_user) +void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) { - struct isci_host *ihost = scic_sds_port_get_controller(iport); + struct isci_host *ihost = sci_port_get_controller(iport); iport->active_phy_mask &= ~(1 << iphy->phy_index); @@ -779,16 +668,7 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, isci_port_link_down(ihost, iphy, iport); } -/** - * - * @sci_port: This is the port on which the phy should be disabled. - * @sci_phy: This is the specific phy which to disabled. - * - * This function will disable the phy and report that the phy is not valid for - * this port object. None - */ -static void scic_sds_port_invalid_link_up(struct isci_port *iport, - struct isci_phy *iphy) +static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; @@ -798,12 +678,12 @@ static void scic_sds_port_invalid_link_up(struct isci_port *iport, * invalid link. */ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { - scic_sds_controller_set_invalid_phy(ihost, iphy); + sci_controller_set_invalid_phy(ihost, iphy); dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); } } -static bool is_port_ready_state(enum scic_sds_port_states state) +static bool is_port_ready_state(enum sci_port_states state) { switch (state) { case SCI_PORT_READY: @@ -818,10 +698,10 @@ static bool is_port_ready_state(enum scic_sds_port_states state) /* flag dummy rnc hanling when exiting a ready state */ static void port_state_machine_change(struct isci_port *iport, - enum scic_sds_port_states state) + enum sci_port_states state) { struct sci_base_state_machine *sm = &iport->sm; - enum scic_sds_port_states old_state = sm->current_state_id; + enum sci_port_states old_state = sm->current_state_id; if (is_port_ready_state(old_state) && !is_port_ready_state(state)) iport->ready_exit = true; @@ -831,11 +711,11 @@ static void port_state_machine_change(struct isci_port *iport, } /** - * scic_sds_port_general_link_up_handler - phy can be assigned to port? - * @sci_port: scic_sds_port object for which has a phy that has gone link up. + * sci_port_general_link_up_handler - phy can be assigned to port? + * @sci_port: sci_port object for which has a phy that has gone link up. * @sci_phy: This is the struct isci_phy object that has gone link up. * @do_notify_user: This parameter specifies whether to inform the user (via - * scic_cb_port_link_up()) as to the fact that a new phy as become ready. + * sci_port_link_up()) as to the fact that a new phy as become ready. * * Determine if this phy can be assigned to this * port . If the phy is not a valid PHY for @@ -843,15 +723,15 @@ static void port_state_machine_change(struct isci_port *iport, * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in * the same port. none */ -static void scic_sds_port_general_link_up_handler(struct isci_port *iport, +static void sci_port_general_link_up_handler(struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user) { struct sci_sas_address port_sas_address; struct sci_sas_address phy_sas_address; - scic_sds_port_get_attached_sas_address(iport, &port_sas_address); - scic_sds_phy_get_attached_sas_address(iphy, &phy_sas_address); + sci_port_get_attached_sas_address(iport, &port_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_sas_address); /* If the SAS address of the new phy matches the SAS address of * other phys in the port OR this is the first phy in the port, @@ -863,11 +743,11 @@ static void scic_sds_port_general_link_up_handler(struct isci_port *iport, iport->active_phy_mask == 0) { struct sci_base_state_machine *sm = &iport->sm; - scic_sds_port_activate_phy(iport, iphy, do_notify_user); + sci_port_activate_phy(iport, iphy, do_notify_user); if (sm->current_state_id == SCI_PORT_RESETTING) port_state_machine_change(iport, SCI_PORT_READY); } else - scic_sds_port_invalid_link_up(iport, iphy); + sci_port_invalid_link_up(iport, iphy); } @@ -881,7 +761,7 @@ static void scic_sds_port_general_link_up_handler(struct isci_port *iport, * bool true Is returned if this is a wide ported port. false Is returned if * this is a narrow port. */ -static bool scic_sds_port_is_wide(struct isci_port *iport) +static bool sci_port_is_wide(struct isci_port *iport) { u32 index; u32 phy_count = 0; @@ -909,14 +789,14 @@ static bool scic_sds_port_is_wide(struct isci_port *iport) * wide ports and direct attached phys. Since there are no wide ported SATA * devices this could become an invalid port configuration. */ -bool scic_sds_port_link_detected( +bool sci_port_link_detected( struct isci_port *iport, struct isci_phy *iphy) { if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && - scic_sds_port_is_wide(iport)) { - scic_sds_port_invalid_link_up(iport, iphy); + sci_port_is_wide(iport)) { + sci_port_invalid_link_up(iport, iphy); return false; } @@ -977,11 +857,11 @@ done: * * */ -static void scic_sds_port_update_viit_entry(struct isci_port *iport) +static void sci_port_update_viit_entry(struct isci_port *iport) { struct sci_sas_address sas_address; - scic_sds_port_get_sas_address(iport, &sas_address); + sci_port_get_sas_address(iport, &sas_address); writel(sas_address.high, &iport->viit_registers->initiator_sas_address_hi); @@ -999,7 +879,7 @@ static void scic_sds_port_update_viit_entry(struct isci_port *iport) &iport->viit_registers->status); } -enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) +enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) { u16 index; struct isci_phy *iphy; @@ -1010,7 +890,7 @@ enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) * lowest maximum link rate. */ for (index = 0; index < SCI_MAX_PHYS; index++) { iphy = iport->phy_table[index]; - if (iphy && scic_sds_port_active_phy(iport, iphy) && + if (iphy && sci_port_active_phy(iport, iphy) && iphy->max_negotiated_speed < max_allowed_speed) max_allowed_speed = iphy->max_negotiated_speed; } @@ -1018,7 +898,7 @@ enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) return max_allowed_speed; } -static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) +static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; @@ -1028,7 +908,7 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) } /** - * scic_sds_port_post_dummy_request() - post dummy/workaround request + * sci_port_post_dummy_request() - post dummy/workaround request * @sci_port: port to post task * * Prevent the hardware scheduler from posting new requests to the front @@ -1036,7 +916,7 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) * ongoing requests. * */ -static void scic_sds_port_post_dummy_request(struct isci_port *iport) +static void sci_port_post_dummy_request(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; @@ -1050,7 +930,7 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } /** @@ -1060,7 +940,7 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) * @sci_port: The port on which the task must be aborted. * */ -static void scic_sds_port_abort_dummy_request(struct isci_port *iport) +static void sci_port_abort_dummy_request(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; @@ -1074,7 +954,7 @@ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } /** @@ -1084,7 +964,7 @@ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) * This method will resume the port task scheduler for this port object. none */ static void -scic_sds_port_resume_port_task_scheduler(struct isci_port *iport) +sci_port_resume_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; @@ -1093,11 +973,11 @@ scic_sds_port_resume_port_task_scheduler(struct isci_port *iport) writel(pts_control_value, &iport->port_task_scheduler_registers->control); } -static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - scic_sds_port_suspend_port_task_scheduler(iport); + sci_port_suspend_port_task_scheduler(iport); iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; @@ -1108,7 +988,7 @@ static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_mac } } -static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) { u32 index; struct isci_port *iport = container_of(sm, typeof(*iport), sm); @@ -1124,18 +1004,18 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state } } - scic_sds_port_update_viit_entry(iport); + sci_port_update_viit_entry(iport); - scic_sds_port_resume_port_task_scheduler(iport); + sci_port_resume_port_task_scheduler(iport); /* * Post the dummy task for the port so the hardware can schedule * io correctly */ - scic_sds_port_post_dummy_request(iport); + sci_port_post_dummy_request(iport); } -static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) +static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; @@ -1157,7 +1037,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } /** @@ -1168,7 +1048,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports * the port not ready and suspends the port task scheduler. none */ -static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; @@ -1178,15 +1058,15 @@ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_ * the hardware will treat this as a NOP and just return abort * complete. */ - scic_sds_port_abort_dummy_request(iport); + sci_port_abort_dummy_request(iport); isci_port_not_ready(ihost, iport); if (iport->ready_exit) - scic_sds_port_invalidate_dummy_remote_node(iport); + sci_port_invalidate_dummy_remote_node(iport); } -static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; @@ -1201,20 +1081,20 @@ static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state SCI_PORT_SUB_OPERATIONAL); } -static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - scic_sds_port_suspend_port_task_scheduler(iport); + sci_port_suspend_port_task_scheduler(iport); if (iport->ready_exit) - scic_sds_port_invalidate_dummy_remote_node(iport); + sci_port_invalidate_dummy_remote_node(iport); } -enum sci_status scic_sds_port_start(struct isci_port *iport) +enum sci_status sci_port_start(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; enum sci_status status = SCI_SUCCESS; - enum scic_sds_port_states state; + enum sci_port_states state; u32 phy_mask; state = iport->sm.current_state_id; @@ -1234,11 +1114,11 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) } if (iport->reserved_rni == SCU_DUMMY_INDEX) { - u16 rni = scic_sds_remote_node_table_allocate_remote_node( + u16 rni = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, 1); if (rni != SCU_DUMMY_INDEX) - scic_sds_port_construct_dummy_rnc(iport, rni); + sci_port_construct_dummy_rnc(iport, rni); else status = SCI_FAILURE_INSUFFICIENT_RESOURCES; iport->reserved_rni = rni; @@ -1251,19 +1131,19 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) if (tag == SCI_CONTROLLER_INVALID_IO_TAG) status = SCI_FAILURE_INSUFFICIENT_RESOURCES; else - scic_sds_port_construct_dummy_task(iport, tag); + sci_port_construct_dummy_task(iport, tag); iport->reserved_tag = tag; } if (status == SCI_SUCCESS) { - phy_mask = scic_sds_port_get_phys(iport); + phy_mask = sci_port_get_phys(iport); /* * There are one or more phys assigned to this port. Make sure * the port's phy mask is in fact legal and supported by the * silicon. */ - if (scic_sds_port_is_phy_mask_valid(iport, phy_mask) == true) { + if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { port_state_machine_change(iport, SCI_PORT_READY); @@ -1273,14 +1153,14 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) } if (status != SCI_SUCCESS) - scic_sds_port_destroy_dummy_resources(iport); + sci_port_destroy_dummy_resources(iport); return status; } -enum sci_status scic_sds_port_stop(struct isci_port *iport) +enum sci_status sci_port_stop(struct isci_port *iport) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1300,11 +1180,11 @@ enum sci_status scic_sds_port_stop(struct isci_port *iport) } } -static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout) +static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) { enum sci_status status = SCI_FAILURE_INVALID_PHY; struct isci_phy *iphy = NULL; - enum scic_sds_port_states state; + enum sci_port_states state; u32 phy_index; state = iport->sm.current_state_id; @@ -1317,7 +1197,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout /* Select a phy on which we can send the hard reset request. */ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { iphy = iport->phy_table[phy_index]; - if (iphy && !scic_sds_port_active_phy(iport, iphy)) { + if (iphy && !sci_port_active_phy(iport, iphy)) { /* * We found a phy but it is not ready select * different phy @@ -1329,7 +1209,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout /* If we have a phy then go ahead and start the reset procedure */ if (!iphy) return status; - status = scic_sds_phy_reset(iphy); + status = sci_phy_reset(iphy); if (status != SCI_SUCCESS) return status; @@ -1342,7 +1222,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout } /** - * scic_sds_port_add_phy() - + * sci_port_add_phy() - * @sci_port: This parameter specifies the port in which the phy will be added. * @sci_phy: This parameter is the phy which is to be added to the port. * @@ -1350,11 +1230,11 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other * status is a failure to add the phy to the port. */ -enum sci_status scic_sds_port_add_phy(struct isci_port *iport, +enum sci_status sci_port_add_phy(struct isci_port *iport, struct isci_phy *iphy) { enum sci_status status; - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1362,7 +1242,7 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, struct sci_sas_address port_sas_address; /* Read the port assigned SAS Address if there is one */ - scic_sds_port_get_sas_address(iport, &port_sas_address); + sci_port_get_sas_address(iport, &port_sas_address); if (port_sas_address.high != 0 && port_sas_address.low != 0) { struct sci_sas_address phy_sas_address; @@ -1370,32 +1250,32 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, /* Make sure that the PHY SAS Address matches the SAS Address * for this port */ - scic_sds_phy_get_sas_address(iphy, &phy_sas_address); + sci_phy_get_sas_address(iphy, &phy_sas_address); if (port_sas_address.high != phy_sas_address.high || port_sas_address.low != phy_sas_address.low) return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } - return scic_sds_port_set_phy(iport, iphy); + return sci_port_set_phy(iport, iphy); } case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: - status = scic_sds_port_set_phy(iport, iphy); + status = sci_port_set_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, true); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return status; case SCI_PORT_SUB_CONFIGURING: - status = scic_sds_port_set_phy(iport, iphy); + status = sci_port_set_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, true); /* Re-enter the configuring state since this may be the last phy in * the port. @@ -1411,7 +1291,7 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, } /** - * scic_sds_port_remove_phy() - + * sci_port_remove_phy() - * @sci_port: This parameter specifies the port in which the phy will be added. * @sci_phy: This parameter is the phy which is to be added to the port. * @@ -1419,33 +1299,33 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any * other status is a failure to add the phy to the port. */ -enum sci_status scic_sds_port_remove_phy(struct isci_port *iport, +enum sci_status sci_port_remove_phy(struct isci_port *iport, struct isci_phy *iphy) { enum sci_status status; - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: - return scic_sds_port_clear_phy(iport, iphy); + return sci_port_clear_phy(iport, iphy); case SCI_PORT_SUB_OPERATIONAL: - status = scic_sds_port_clear_phy(iport, iphy); + status = sci_port_clear_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_deactivate_phy(iport, iphy, true); + sci_port_deactivate_phy(iport, iphy, true); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; case SCI_PORT_SUB_CONFIGURING: - status = scic_sds_port_clear_phy(iport, iphy); + status = sci_port_clear_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_deactivate_phy(iport, iphy, true); + sci_port_deactivate_phy(iport, iphy, true); /* Re-enter the configuring state since this may be the last phy in * the port @@ -1460,10 +1340,10 @@ enum sci_status scic_sds_port_remove_phy(struct isci_port *iport, } } -enum sci_status scic_sds_port_link_up(struct isci_port *iport, +enum sci_status sci_port_link_up(struct isci_port *iport, struct isci_phy *iphy) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1471,13 +1351,13 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, /* Since this is the first phy going link up for the port we * can just enable it and continue */ - scic_sds_port_activate_phy(iport, iphy, true); + sci_port_activate_phy(iport, iphy, true); port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); return SCI_SUCCESS; case SCI_PORT_SUB_OPERATIONAL: - scic_sds_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, true); return SCI_SUCCESS; case SCI_PORT_RESETTING: /* TODO We should make sure that the phy that has gone @@ -1494,7 +1374,7 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ - scic_sds_port_general_link_up_handler(iport, iphy, false); + sci_port_general_link_up_handler(iport, iphy, false); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), @@ -1503,15 +1383,15 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, } } -enum sci_status scic_sds_port_link_down(struct isci_port *iport, +enum sci_status sci_port_link_down(struct isci_port *iport, struct isci_phy *iphy) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_SUB_OPERATIONAL: - scic_sds_port_deactivate_phy(iport, iphy, true); + sci_port_deactivate_phy(iport, iphy, true); /* If there are no active phys left in the port, then * transition the port to the WAITING state until such time @@ -1524,7 +1404,7 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, case SCI_PORT_RESETTING: /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ - scic_sds_port_deactivate_phy(iport, iphy, false); + sci_port_deactivate_phy(iport, iphy, false); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), @@ -1533,11 +1413,11 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, } } -enum sci_status scic_sds_port_start_io(struct isci_port *iport, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_port_start_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1553,11 +1433,11 @@ enum sci_status scic_sds_port_start_io(struct isci_port *iport, } } -enum sci_status scic_sds_port_complete_io(struct isci_port *iport, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_port_complete_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1566,7 +1446,7 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_PORT_STOPPING: - scic_sds_port_decrement_request_count(iport); + sci_port_decrement_request_count(iport); if (iport->started_request_count == 0) port_state_machine_change(iport, @@ -1577,10 +1457,10 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, case SCI_PORT_FAILED: case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: - scic_sds_port_decrement_request_count(iport); + sci_port_decrement_request_count(iport); break; case SCI_PORT_SUB_CONFIGURING: - scic_sds_port_decrement_request_count(iport); + sci_port_decrement_request_count(iport); if (iport->started_request_count == 0) { port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); @@ -1590,32 +1470,17 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, return SCI_SUCCESS; } -/** - * - * @sci_port: This is the port object which to suspend. - * - * This method will enable the SCU Port Task Scheduler for this port object but - * will leave the port task scheduler in a suspended state. none - */ -static void -scic_sds_port_enable_port_task_scheduler(struct isci_port *iport) +static void sci_port_enable_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; + /* enable the port task scheduler in a suspended state */ pts_control_value = readl(&iport->port_task_scheduler_registers->control); pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); writel(pts_control_value, &iport->port_task_scheduler_registers->control); } -/** - * - * @sci_port: This is the port object which to resume. - * - * This method will disable the SCU port task scheduler for this port object. - * none - */ -static void -scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) +static void sci_port_disable_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; @@ -1625,7 +1490,7 @@ scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) writel(pts_control_value, &iport->port_task_scheduler_registers->control); } -static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) +static void sci_port_post_dummy_remote_node(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; @@ -1639,7 +1504,7 @@ static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) command = SCU_CONTEXT_COMMAND_POST_RNC_32 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); /* ensure hardware has seen the post rnc command and give it * ample time to act before sending the suspend @@ -1650,10 +1515,10 @@ static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } -static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) +static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); @@ -1662,19 +1527,19 @@ static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) * If we enter this state becasuse of a request to stop * the port then we want to disable the hardwares port * task scheduler. */ - scic_sds_port_disable_port_task_scheduler(iport); + sci_port_disable_port_task_scheduler(iport); } } -static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) +static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); /* Enable and suspend the port task scheduler */ - scic_sds_port_enable_port_task_scheduler(iport); + sci_port_enable_port_task_scheduler(iport); } -static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; @@ -1687,30 +1552,30 @@ static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) isci_port_not_ready(ihost, iport); /* Post and suspend the dummy remote node context for this port. */ - scic_sds_port_post_dummy_remote_node(iport); + sci_port_post_dummy_remote_node(iport); /* Start the ready substate machine */ port_state_machine_change(iport, SCI_PORT_SUB_WAITING); } -static void scic_sds_port_resetting_state_exit(struct sci_base_state_machine *sm) +static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_del_timer(&iport->timer); } -static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm) +static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_del_timer(&iport->timer); - scic_sds_port_destroy_dummy_resources(iport); + sci_port_destroy_dummy_resources(iport); } -static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) +static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); @@ -1719,40 +1584,40 @@ static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) /* --------------------------------------------------------------------------- */ -static const struct sci_base_state scic_sds_port_state_table[] = { +static const struct sci_base_state sci_port_state_table[] = { [SCI_PORT_STOPPED] = { - .enter_state = scic_sds_port_stopped_state_enter, - .exit_state = scic_sds_port_stopped_state_exit + .enter_state = sci_port_stopped_state_enter, + .exit_state = sci_port_stopped_state_exit }, [SCI_PORT_STOPPING] = { - .exit_state = scic_sds_port_stopping_state_exit + .exit_state = sci_port_stopping_state_exit }, [SCI_PORT_READY] = { - .enter_state = scic_sds_port_ready_state_enter, + .enter_state = sci_port_ready_state_enter, }, [SCI_PORT_SUB_WAITING] = { - .enter_state = scic_sds_port_ready_substate_waiting_enter, + .enter_state = sci_port_ready_substate_waiting_enter, }, [SCI_PORT_SUB_OPERATIONAL] = { - .enter_state = scic_sds_port_ready_substate_operational_enter, - .exit_state = scic_sds_port_ready_substate_operational_exit + .enter_state = sci_port_ready_substate_operational_enter, + .exit_state = sci_port_ready_substate_operational_exit }, [SCI_PORT_SUB_CONFIGURING] = { - .enter_state = scic_sds_port_ready_substate_configuring_enter, - .exit_state = scic_sds_port_ready_substate_configuring_exit + .enter_state = sci_port_ready_substate_configuring_enter, + .exit_state = sci_port_ready_substate_configuring_exit }, [SCI_PORT_RESETTING] = { - .exit_state = scic_sds_port_resetting_state_exit + .exit_state = sci_port_resetting_state_exit }, [SCI_PORT_FAILED] = { - .enter_state = scic_sds_port_failed_state_enter, + .enter_state = sci_port_failed_state_enter, } }; -void scic_sds_port_construct(struct isci_port *iport, u8 index, +void sci_port_construct(struct isci_port *iport, u8 index, struct isci_host *ihost) { - sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED); + sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); iport->logical_port_index = SCIC_SDS_DUMMY_PORT; iport->physical_port_index = index; @@ -1798,9 +1663,7 @@ enum isci_status isci_port_get_state( return isci_port->status; } -void scic_sds_port_broadcast_change_received( - struct isci_port *iport, - struct isci_phy *iphy) +void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; @@ -1823,7 +1686,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor spin_lock_irqsave(&ihost->scic_lock, flags); #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT - status = scic_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); + status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -1840,7 +1703,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor ret = TMF_RESP_FUNC_FAILED; dev_err(&ihost->pdev->dev, - "%s: iport = %p; scic_port_hard_reset call" + "%s: iport = %p; sci_port_hard_reset call" " failed 0x%x\n", __func__, iport, status); @@ -1863,8 +1726,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor if (!iphy) continue; - scic_sds_phy_stop(iphy); - scic_sds_phy_start(iphy); + sci_phy_stop(iphy); + sci_phy_start(iphy); } spin_unlock_irqrestore(&ihost->scic_lock, flags); } diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 9a9be7b47b4a..4c4ab8126d9f 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -123,7 +123,7 @@ struct isci_port { struct scu_viit_entry __iomem *viit_registers; }; -enum scic_port_not_ready_reason_code { +enum sci_port_not_ready_reason_code { SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS, SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED, SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION, @@ -132,25 +132,25 @@ enum scic_port_not_ready_reason_code { SCIC_PORT_NOT_READY_REASON_CODE_MAX }; -struct scic_port_end_point_properties { +struct sci_port_end_point_properties { struct sci_sas_address sas_address; - struct scic_phy_proto protocols; + struct sci_phy_proto protocols; }; -struct scic_port_properties { +struct sci_port_properties { u32 index; - struct scic_port_end_point_properties local; - struct scic_port_end_point_properties remote; + struct sci_port_end_point_properties local; + struct sci_port_end_point_properties remote; u32 phy_mask; }; /** - * enum scic_sds_port_states - This enumeration depicts all the states for the + * enum sci_port_states - This enumeration depicts all the states for the * common port state machine. * * */ -enum scic_sds_port_states { +enum sci_port_states { /** * This state indicates that the port has successfully been stopped. * In this state no new IO operations are permitted. @@ -211,23 +211,23 @@ enum scic_sds_port_states { }; /** - * scic_sds_port_get_controller() - + * sci_port_get_controller() - * * Helper macro to get the owning controller of this port */ -#define scic_sds_port_get_controller(this_port) \ +#define sci_port_get_controller(this_port) \ ((this_port)->owning_controller) /** - * scic_sds_port_get_index() - + * sci_port_get_index() - * * This macro returns the physical port index for this port object */ -#define scic_sds_port_get_index(this_port) \ +#define sci_port_get_index(this_port) \ ((this_port)->physical_port_index) -static inline void scic_sds_port_decrement_request_count(struct isci_port *iport) +static inline void sci_port_decrement_request_count(struct isci_port *iport) { if (WARN_ONCE(iport->started_request_count == 0, "%s: tried to decrement started_request_count past 0!?", @@ -237,79 +237,73 @@ static inline void scic_sds_port_decrement_request_count(struct isci_port *iport iport->started_request_count--; } -#define scic_sds_port_active_phy(port, phy) \ +#define sci_port_active_phy(port, phy) \ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0) -void scic_sds_port_construct( +void sci_port_construct( struct isci_port *iport, u8 port_index, struct isci_host *ihost); -enum sci_status scic_sds_port_initialize( - struct isci_port *iport, - void __iomem *port_task_scheduler_registers, - void __iomem *port_configuration_regsiter, - void __iomem *viit_registers); - -enum sci_status scic_sds_port_start(struct isci_port *iport); -enum sci_status scic_sds_port_stop(struct isci_port *iport); +enum sci_status sci_port_start(struct isci_port *iport); +enum sci_status sci_port_stop(struct isci_port *iport); -enum sci_status scic_sds_port_add_phy( +enum sci_status sci_port_add_phy( struct isci_port *iport, struct isci_phy *iphy); -enum sci_status scic_sds_port_remove_phy( +enum sci_status sci_port_remove_phy( struct isci_port *iport, struct isci_phy *iphy); -void scic_sds_port_setup_transports( +void sci_port_setup_transports( struct isci_port *iport, u32 device_id); void isci_port_bcn_enable(struct isci_host *, struct isci_port *); -void scic_sds_port_deactivate_phy( +void sci_port_deactivate_phy( struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user); -bool scic_sds_port_link_detected( +bool sci_port_link_detected( struct isci_port *iport, struct isci_phy *iphy); -enum sci_status scic_sds_port_link_up(struct isci_port *iport, +enum sci_status sci_port_link_up(struct isci_port *iport, struct isci_phy *iphy); -enum sci_status scic_sds_port_link_down(struct isci_port *iport, +enum sci_status sci_port_link_down(struct isci_port *iport, struct isci_phy *iphy); struct isci_request; struct isci_remote_device; -enum sci_status scic_sds_port_start_io( +enum sci_status sci_port_start_io( struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_port_complete_io( +enum sci_status sci_port_complete_io( struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq); -enum sas_linkrate scic_sds_port_get_max_allowed_speed( +enum sas_linkrate sci_port_get_max_allowed_speed( struct isci_port *iport); -void scic_sds_port_broadcast_change_received( +void sci_port_broadcast_change_received( struct isci_port *iport, struct isci_phy *iphy); -bool scic_sds_port_is_valid_phy_assignment( +bool sci_port_is_valid_phy_assignment( struct isci_port *iport, u32 phy_index); -void scic_sds_port_get_sas_address( +void sci_port_get_sas_address( struct isci_port *iport, struct sci_sas_address *sas_address); -void scic_sds_port_get_attached_sas_address( +void sci_port_get_attached_sas_address( struct isci_port *iport, struct sci_sas_address *sas_address); diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index a0a135d54e95..c8b16db6bbde 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -112,7 +112,7 @@ static s32 sci_sas_address_compare( * port. port address if the port can be found to match the phy. * NULL if there is no matching port for the phy. */ -static struct isci_port *scic_sds_port_configuration_agent_find_port( +static struct isci_port *sci_port_configuration_agent_find_port( struct isci_host *ihost, struct isci_phy *iphy) { @@ -127,14 +127,14 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( * more phys match the sent and received SAS address as this phy in which * case it should participate in the same port. */ - scic_sds_phy_get_sas_address(iphy, &phy_sas_address); - scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address); + sci_phy_get_sas_address(iphy, &phy_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; - scic_sds_port_get_sas_address(iport, &port_sas_address); - scic_sds_port_get_attached_sas_address(iport, &port_attached_device_address); + sci_port_get_sas_address(iport, &port_sas_address); + sci_port_get_attached_sas_address(iport, &port_attached_device_address); if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) @@ -156,9 +156,9 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION * the port configuration is not valid for this port configuration agent. */ -static enum sci_status scic_sds_port_configuration_agent_validate_ports( +static enum sci_status sci_port_configuration_agent_validate_ports( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) + struct sci_port_configuration_agent *port_agent) { struct sci_sas_address first_address; struct sci_sas_address second_address; @@ -194,8 +194,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * PE0 and PE3 can never have the same SAS Address unless they * are part of the same x4 wide port and we have already checked * for this condition. */ - scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address); - scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address); + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; @@ -207,8 +207,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * part of the same port. */ if (port_agent->phy_valid_port_range[0].min_index == 0 && port_agent->phy_valid_port_range[1].min_index == 1) { - scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address); - scic_sds_phy_get_sas_address(&ihost->phys[2], &second_address); + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[2], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; @@ -221,8 +221,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * part of the same port. */ if (port_agent->phy_valid_port_range[2].min_index == 2 && port_agent->phy_valid_port_range[3].min_index == 3) { - scic_sds_phy_get_sas_address(&ihost->phys[1], &first_address); - scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address); + sci_phy_get_sas_address(&ihost->phys[1], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; @@ -239,8 +239,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( /* verify all of the phys in the same port are using the same SAS address */ static enum sci_status -scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) +sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) { u32 phy_mask; u32 assigned_phy_mask; @@ -254,7 +254,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, sas_address.low = 0; for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { - phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask; + phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; if (!phy_mask) continue; @@ -269,7 +269,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); /* @@ -294,7 +294,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, while (phy_index < SCI_MAX_PHYS) { if ((phy_mask & (1 << phy_index)) == 0) continue; - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { @@ -307,7 +307,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; - scic_sds_port_add_phy(&ihost->ports[port_index], + sci_port_add_phy(&ihost->ports[port_index], &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); @@ -316,14 +316,14 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, phy_index++; } - return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); + return sci_port_configuration_agent_validate_ports(ihost, port_agent); } static void mpc_agent_timeout(unsigned long data) { u8 index; struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_port_configuration_agent *port_agent; + struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; @@ -355,8 +355,8 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, +static void sci_mpc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { @@ -367,10 +367,10 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, if (!iport) return; - port_agent->phy_ready_mask |= (1 << scic_sds_phy_get_index(iphy)); - scic_sds_port_link_up(iport, iphy); - if ((iport->active_phy_mask & (1 << scic_sds_phy_get_index(iphy)))) - port_agent->phy_configured_mask |= (1 << scic_sds_phy_get_index(iphy)); + port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy)); + sci_port_link_up(iport, iphy); + if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy)))) + port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy)); } /** @@ -390,9 +390,9 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, * not associated with a port there is no action taken. Is it possible to get a * link down notification from a phy that has no assocoated port? */ -static void scic_sds_mpc_agent_link_down( +static void sci_mpc_agent_link_down( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { @@ -405,9 +405,9 @@ static void scic_sds_mpc_agent_link_down( * state. */ port_agent->phy_ready_mask &= - ~(1 << scic_sds_phy_get_index(iphy)); + ~(1 << sci_phy_get_index(iphy)); port_agent->phy_configured_mask &= - ~(1 << scic_sds_phy_get_index(iphy)); + ~(1 << sci_phy_get_index(iphy)); /* * Check to see if there are more phys waiting to be @@ -424,7 +424,7 @@ static void scic_sds_mpc_agent_link_down( SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); } - scic_sds_port_link_down(iport, iphy); + sci_port_link_down(iport, iphy); } } @@ -432,8 +432,8 @@ static void scic_sds_mpc_agent_link_down( * configuration mode. */ static enum sci_status -scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) +sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) { u8 phy_index; u8 port_index; @@ -446,11 +446,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, port_index = phy_index; /* Get the assigned SAS Address for the first PHY on the controller. */ - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); while (++phy_index < SCI_MAX_PHYS) { - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); /* Verify each of the SAS address are all the same for every PHY */ @@ -465,11 +465,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, } } - return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); + return sci_port_configuration_agent_validate_ports(ihost, port_agent); } -static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, +static void sci_apc_agent_configure_ports(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, struct isci_phy *iphy, bool start_timer) { @@ -478,10 +478,10 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, struct isci_port *iport; enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; - iport = scic_sds_port_configuration_agent_find_port(ihost, iphy); + iport = sci_port_configuration_agent_find_port(ihost, iphy); if (iport) { - if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) apc_activity = SCIC_SDS_APC_ADD_PHY; else apc_activity = SCIC_SDS_APC_SKIP_PHY; @@ -499,7 +499,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, iport = &ihost->ports[port_index]; /* First we must make sure that this PHY can be added to this Port. */ - if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* * Port contains a PHY with a greater PHY ID than the current * PHY that has gone link up. This phy can not be part of any @@ -559,7 +559,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, switch (apc_activity) { case SCIC_SDS_APC_ADD_PHY: - status = scic_sds_port_add_phy(iport, iphy); + status = sci_port_add_phy(iport, iphy); if (status == SCI_SUCCESS) { port_agent->phy_configured_mask |= (1 << iphy->phy_index); @@ -588,7 +588,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, } /** - * scic_sds_apc_agent_link_up - handle apc link up events + * sci_apc_agent_link_up - handle apc link up events * @scic: This is the controller object that receives the link up * notification. * @sci_port: This is the port object associated with the phy. If the is no @@ -599,8 +599,8 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, * notifications. Is it possible to get a link down notification from a phy * that has no assocoated port? */ -static void scic_sds_apc_agent_link_up(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, +static void sci_apc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { @@ -609,7 +609,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, if (!iport) { /* the phy is not the part of this port */ port_agent->phy_ready_mask |= 1 << phy_index; - scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true); + sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); } else { /* the phy is already the part of the port */ u32 port_state = iport->sm.current_state_id; @@ -620,7 +620,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, */ BUG_ON(port_state != SCI_PORT_RESETTING); port_agent->phy_ready_mask |= 1 << phy_index; - scic_sds_port_link_up(iport, iphy); + sci_port_link_up(iport, iphy); } } @@ -637,20 +637,20 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, * possible to get a link down notification from a phy that has no assocoated * port? */ -static void scic_sds_apc_agent_link_down( +static void sci_apc_agent_link_down( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { - port_agent->phy_ready_mask &= ~(1 << scic_sds_phy_get_index(iphy)); + port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy)); if (!iport) return; if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { enum sci_status status; - status = scic_sds_port_remove_phy(iport, iphy); + status = sci_port_remove_phy(iport, iphy); if (status == SCI_SUCCESS) port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); @@ -662,7 +662,7 @@ static void apc_agent_timeout(unsigned long data) { u32 index; struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_port_configuration_agent *port_agent; + struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; @@ -686,7 +686,7 @@ static void apc_agent_timeout(unsigned long data) if ((configure_phy_mask & (1 << index)) == 0) continue; - scic_sds_apc_agent_configure_ports(ihost, port_agent, + sci_apc_agent_configure_ports(ihost, port_agent, &ihost->phys[index], false); } @@ -706,8 +706,8 @@ done: * call is universal for both manual port configuration and automatic port * configuration modes. */ -void scic_sds_port_configuration_agent_construct( - struct scic_sds_port_configuration_agent *port_agent) +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent) { u32 index; @@ -725,29 +725,29 @@ void scic_sds_port_configuration_agent_construct( } } -enum sci_status scic_sds_port_configuration_agent_initialize( +enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) + struct sci_port_configuration_agent *port_agent) { enum sci_status status; - enum scic_port_configuration_mode mode; + enum sci_port_configuration_mode mode; - mode = ihost->oem_parameters.sds1.controller.mode_type; + mode = ihost->oem_parameters.controller.mode_type; if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { - status = scic_sds_mpc_agent_validate_phy_configuration( + status = sci_mpc_agent_validate_phy_configuration( ihost, port_agent); - port_agent->link_up_handler = scic_sds_mpc_agent_link_up; - port_agent->link_down_handler = scic_sds_mpc_agent_link_down; + port_agent->link_up_handler = sci_mpc_agent_link_up; + port_agent->link_down_handler = sci_mpc_agent_link_down; sci_init_timer(&port_agent->timer, mpc_agent_timeout); } else { - status = scic_sds_apc_agent_validate_phy_configuration( + status = sci_apc_agent_validate_phy_configuration( ihost, port_agent); - port_agent->link_up_handler = scic_sds_apc_agent_link_up; - port_agent->link_down_handler = scic_sds_apc_agent_link_down; + port_agent->link_up_handler = sci_apc_agent_link_up; + port_agent->link_down_handler = sci_apc_agent_link_down; sci_init_timer(&port_agent->timer, apc_agent_timeout); } diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 99b13c191877..c7732fb28889 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -111,25 +111,15 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) return rom; } -/** - * isci_parse_oem_parameters() - This method will take OEM parameters - * from the module init parameters and copy them to oem_params. This will - * only copy values that are not set to the module parameter default values - * @oem_parameters: This parameter specifies the controller default OEM - * parameters. It is expected that this has been initialized to the default - * parameters for the controller - * - * - */ -enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem_params, +enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, struct isci_orom *orom, int scu_index) { /* check for valid inputs */ if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || - scu_index > orom->hdr.num_elements || !oem_params) + scu_index > orom->hdr.num_elements || !oem) return -EINVAL; - oem_params->sds1 = orom->ctrl[scu_index]; + *oem = orom->ctrl[scu_index]; return 0; } diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index e40cb5f6eba5..dc007e692f4e 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h @@ -74,7 +74,7 @@ #define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED /* parameters that can be set by module parameters */ -struct scic_sds_user_parameters { +struct sci_user_parameters { struct sci_phy_user_params { /** * This field specifies the NOTIFY (ENABLE SPIN UP) primitive @@ -147,30 +147,16 @@ struct scic_sds_user_parameters { }; -/* XXX kill this union */ -union scic_user_parameters { - /** - * This field specifies the user parameters specific to the - * Storage Controller Unit (SCU) Driver Standard (SDS) version - * 1. - */ - struct scic_sds_user_parameters sds1; -}; - #define SCIC_SDS_PARM_PHY_MASK_MIN 0x0 #define SCIC_SDS_PARM_PHY_MASK_MAX 0xF #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 -struct scic_sds_oem_params; -int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); - -union scic_oem_parameters; -void scic_oem_parameters_get(struct isci_host *ihost, - union scic_oem_parameters *oem); +struct sci_oem_params; +int sci_oem_parameters_validate(struct sci_oem_params *oem); struct isci_orom; struct isci_orom *isci_request_oprom(struct pci_dev *pdev); -enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem, +enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, struct isci_orom *orom, int scu_index); struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); @@ -214,7 +200,7 @@ struct isci_oem_hdr { * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs * being assigned is sufficient to declare manual PORT configuration. */ -enum scic_port_configuration_mode { +enum sci_port_configuration_mode { SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0, SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1 }; @@ -230,7 +216,7 @@ struct sci_bios_oem_param_block_hdr { uint8_t reserved[8]; } __attribute__ ((packed)); -struct scic_sds_oem_params { +struct sci_oem_params { struct { uint8_t mode_type; uint8_t max_concurrent_dev_spin_up; @@ -255,19 +241,9 @@ struct scic_sds_oem_params { } phys[SCI_MAX_PHYS]; } __attribute__ ((packed)); -/* XXX kill this union */ -union scic_oem_parameters { - /** - * This field specifies the OEM parameters specific to the - * Storage Controller Unit (SCU) Driver Standard (SDS) version - * 1. - */ - struct scic_sds_oem_params sds1; -}; - struct isci_orom { struct sci_bios_oem_param_block_hdr hdr; - struct scic_sds_oem_params ctrl[SCI_MAX_CONTROLLERS]; + struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS]; } __attribute__ ((packed)); #endif diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 9043b458c999..8c752abb4331 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -68,7 +68,7 @@ * @isci_host: This parameter specifies the isci host object. * @isci_device: This parameter specifies the remote device * - * scic_lock is held on entrance to this function. + * sci_lock is held on entrance to this function. */ static void isci_remote_device_not_ready(struct isci_host *ihost, struct isci_remote_device *idev, u32 reason) @@ -92,7 +92,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, "%s: isci_device = %p request = %p\n", __func__, idev, ireq); - scic_controller_terminate_request(ihost, + sci_controller_terminate_request(ihost, idev, ireq); } @@ -133,7 +133,7 @@ static void rnc_destruct_done(void *_dev) sci_change_state(&idev->sm, SCI_DEV_STOPPED); } -static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) +static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) { struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status = SCI_SUCCESS; @@ -147,7 +147,7 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem ireq->target_device != idev) continue; - s = scic_controller_terminate_request(ihost, idev, ireq); + s = sci_controller_terminate_request(ihost, idev, ireq); if (s != SCI_SUCCESS) status = s; } @@ -155,11 +155,11 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem return status; } -enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, +enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_INITIAL: @@ -174,7 +174,7 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, case SCI_DEV_STARTING: /* device not started so there had better be no requests */ BUG_ON(idev->started_request_count != 0); - scic_sds_remote_node_context_destruct(&idev->rnc, + sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); /* Transition to the stopping state and wait for the * remote node to complete being posted and invalidated. @@ -191,28 +191,28 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, case SCI_SMP_DEV_CMD: sci_change_state(sm, SCI_DEV_STOPPING); if (idev->started_request_count == 0) { - scic_sds_remote_node_context_destruct(&idev->rnc, + sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); return SCI_SUCCESS; } else - return scic_sds_remote_device_terminate_requests(idev); + return sci_remote_device_terminate_requests(idev); break; case SCI_DEV_STOPPING: /* All requests should have been terminated, but if there is an * attempt to stop a device already in the stopping state, then * try again to terminate. */ - return scic_sds_remote_device_terminate_requests(idev); + return sci_remote_device_terminate_requests(idev); case SCI_DEV_RESETTING: sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; } } -enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) +enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_INITIAL: @@ -239,10 +239,10 @@ enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) } } -enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *idev) +enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; if (state != SCI_DEV_RESETTING) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", @@ -254,11 +254,11 @@ enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *ide return SCI_SUCCESS; } -enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, u32 suspend_type) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; if (state != SCI_STP_DEV_CMD) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", @@ -266,15 +266,15 @@ enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, return SCI_FAILURE_INVALID_STATE; } - return scic_sds_remote_node_context_suspend(&idev->rnc, + return sci_remote_node_context_suspend(&idev->rnc, suspend_type, NULL, NULL); } -enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *idev, +enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status; @@ -289,7 +289,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); /* Return the frame back to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_NCQ_ERROR: @@ -302,7 +302,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * void *frame_header; ssize_t word_cnt; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); if (status != SCI_SUCCESS) @@ -311,22 +311,22 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * word_cnt = sizeof(hdr) / sizeof(u32); sci_swab32_cpy(&hdr, frame_header, word_cnt); - ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag)); + ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); if (ireq && ireq->target_device == idev) { /* The IO request is now in charge of releasing the frame */ - status = scic_sds_io_request_frame_handler(ireq, frame_index); + status = sci_io_request_frame_handler(ireq, frame_index); } else { /* We could not map this tag to a valid IO * request Just toss the frame and continue */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); } break; } case SCI_STP_DEV_NCQ: { struct dev_to_host_fis *hdr; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&hdr); if (status != SCI_SUCCESS) @@ -349,7 +349,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * } else status = SCI_FAILURE; - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); break; } case SCI_STP_DEV_CMD: @@ -358,7 +358,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * * in this state. All unsolicited frames are forwarded to the io request * object. */ - status = scic_sds_io_request_frame_handler(idev->working_request, frame_index); + status = sci_io_request_frame_handler(idev->working_request, frame_index); break; } @@ -369,7 +369,7 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_READY: @@ -386,25 +386,25 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) } } -enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *idev, +enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, u32 event_code) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; enum sci_status status; switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_OPS_MISC: case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: - status = scic_sds_remote_node_context_event_handler(&idev->rnc, event_code); + status = sci_remote_node_context_event_handler(&idev->rnc, event_code); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { status = SCI_SUCCESS; /* Suspend the associated RNC */ - scic_sds_remote_node_context_suspend(&idev->rnc, + sci_remote_node_context_suspend(&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); @@ -439,13 +439,13 @@ enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device * */ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) - status = scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); + status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } return status; } -static void scic_sds_remote_device_start_request(struct isci_remote_device *idev, +static void sci_remote_device_start_request(struct isci_remote_device *idev, struct isci_request *ireq, enum sci_status status) { @@ -453,19 +453,19 @@ static void scic_sds_remote_device_start_request(struct isci_remote_device *idev /* cleanup requests that failed after starting on the port */ if (status != SCI_SUCCESS) - scic_sds_port_complete_io(iport, idev, ireq); + sci_port_complete_io(iport, idev, ireq); else { kref_get(&idev->kref); - scic_sds_remote_device_increment_request_count(idev); + sci_remote_device_increment_request_count(idev); } } -enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, +enum sci_status sci_remote_device_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; @@ -488,15 +488,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, * successful it will start the request for the port object then * increment its own request count. */ - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); break; case SCI_STP_DEV_IDLE: { /* handle the start io operation for a sata device that is in @@ -507,18 +507,18 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, * If this is a softreset we may want to have a different * substate. */ - enum scic_sds_remote_device_states new_state; + enum sci_remote_device_states new_state; struct sas_task *task = isci_request_access_task(ireq); - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); if (status != SCI_SUCCESS) break; @@ -535,15 +535,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, struct sas_task *task = isci_request_access_task(ireq); if (task->ata_task.use_ncq) { - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); } else return SCI_FAILURE_INVALID_STATE; break; @@ -551,15 +551,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, case SCI_STP_DEV_AWAIT_RESET: return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; case SCI_SMP_DEV_IDLE: - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); if (status != SCI_SUCCESS) break; @@ -574,7 +574,7 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, return SCI_FAILURE_INVALID_STATE; } - scic_sds_remote_device_start_request(idev, ireq, status); + sci_remote_device_start_request(idev, ireq, status); return status; } @@ -584,24 +584,24 @@ static enum sci_status common_complete_io(struct isci_port *iport, { enum sci_status status; - status = scic_sds_request_complete(ireq); + status = sci_request_complete(ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_port_complete_io(iport, idev, ireq); + status = sci_port_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - scic_sds_remote_device_decrement_request_count(idev); + sci_remote_device_decrement_request_count(idev); return status; } -enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, +enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; @@ -636,7 +636,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". */ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); - } else if (scic_sds_remote_device_get_request_count(idev) == 0) + } else if (sci_remote_device_get_request_count(idev) == 0) sci_change_state(sm, SCI_STP_DEV_IDLE); break; case SCI_SMP_DEV_CMD: @@ -650,8 +650,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, if (status != SCI_SUCCESS) break; - if (scic_sds_remote_device_get_request_count(idev) == 0) - scic_sds_remote_node_context_destruct(&idev->rnc, + if (sci_remote_device_get_request_count(idev) == 0) + sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); break; @@ -668,21 +668,21 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, return status; } -static void scic_sds_remote_device_continue_request(void *dev) +static void sci_remote_device_continue_request(void *dev) { struct isci_remote_device *idev = dev; /* we need to check if this request is still valid to continue. */ if (idev->working_request) - scic_controller_continue_io(idev->working_request); + sci_controller_continue_io(idev->working_request); } -enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, +enum sci_status sci_remote_device_start_task(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; @@ -705,15 +705,15 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); + status = sci_remote_node_context_start_task(&idev->rnc, ireq); if (status != SCI_SUCCESS) goto out; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); if (status != SCI_SUCCESS) goto out; @@ -731,32 +731,32 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, * the correct action when the remote node context is suspended * and later resumed. */ - scic_sds_remote_node_context_suspend(&idev->rnc, + sci_remote_node_context_suspend(&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); - scic_sds_remote_node_context_resume(&idev->rnc, - scic_sds_remote_device_continue_request, + sci_remote_node_context_resume(&idev->rnc, + sci_remote_device_continue_request, idev); out: - scic_sds_remote_device_start_request(idev, ireq, status); + sci_remote_device_start_request(idev, ireq, status); /* We need to let the controller start request handler know that * it can't post TC yet. We will provide a callback function to * post TC when RNC gets resumed. */ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; case SCI_DEV_READY: - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); + status = sci_remote_node_context_start_task(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); break; } - scic_sds_remote_device_start_request(idev, ireq, status); + sci_remote_device_start_request(idev, ireq, status); return status; } @@ -769,16 +769,16 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, * This method takes the request and bulids an appropriate SCU context for the * request and then requests the controller to post the request. none */ -void scic_sds_remote_device_post_request( +void sci_remote_device_post_request( struct isci_remote_device *idev, u32 request) { u32 context; - context = scic_sds_remote_device_build_command_context(idev, request); + context = sci_remote_device_build_command_context(idev, request); - scic_sds_controller_post_request( - scic_sds_remote_device_get_controller(idev), + sci_controller_post_request( + sci_remote_device_get_controller(idev), context ); } @@ -798,7 +798,7 @@ static void remote_device_resume_done(void *_dev) sci_change_state(&idev->sm, SCI_DEV_READY); } -static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) +static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) { struct isci_remote_device *idev = _dev; struct isci_host *ihost = idev->owning_port->owning_controller; @@ -810,7 +810,7 @@ static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handl isci_remote_device_ready(ihost, idev); } -static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); @@ -819,7 +819,7 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac } /** - * scic_remote_device_destruct() - free remote node context and destruct + * sci_remote_device_destruct() - free remote node context and destruct * @remote_device: This parameter specifies the remote device to be destructed. * * Remote device objects are a limited resource. As such, they must be @@ -831,10 +831,10 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac * device isn't valid (e.g. it's already been destoryed, the handle isn't * valid, etc.). */ -static enum sci_status scic_remote_device_destruct(struct isci_remote_device *idev) +static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_host *ihost; if (state != SCI_DEV_STOPPED) { @@ -844,7 +844,7 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id } ihost = idev->owning_port->owning_controller; - scic_sds_controller_free_remote_node_context(ihost, idev, + sci_controller_free_remote_node_context(ihost, idev, idev->rnc.remote_node_index); idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; sci_change_state(sm, SCI_DEV_FINAL); @@ -869,12 +869,12 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ * io requests in process */ BUG_ON(!list_empty(&idev->reqs_in_process)); - scic_remote_device_destruct(idev); + sci_remote_device_destruct(idev); list_del_init(&idev->node); isci_put_device(idev); } -static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; @@ -887,19 +887,19 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac if (prev_state == SCI_DEV_STOPPING) isci_remote_device_deconstruct(ihost, idev); - scic_sds_controller_remote_device_stopped(ihost, idev); + sci_controller_remote_device_stopped(ihost, idev); } -static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); } -static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; @@ -913,7 +913,7 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi isci_remote_device_ready(ihost, idev); } -static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) +static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; @@ -926,42 +926,42 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin } } -static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - scic_sds_remote_node_context_suspend( + sci_remote_node_context_suspend( &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); } -static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) +static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); + sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } -static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); idev->working_request = NULL; - if (scic_sds_remote_node_context_is_ready(&idev->rnc)) { + if (sci_remote_node_context_is_ready(&idev->rnc)) { /* * Since the RNC is ready, it's alright to finish completion * processing (e.g. signal the remote device is ready). */ - scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); + sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); } else { - scic_sds_remote_node_context_resume(&idev->rnc, - scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler, + sci_remote_node_context_resume(&idev->rnc, + sci_stp_remote_device_ready_idle_substate_resume_complete_handler, idev); } } -static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); BUG_ON(idev->working_request == NULL); @@ -969,28 +969,28 @@ static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); } -static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) +static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) isci_remote_device_not_ready(ihost, idev, idev->not_ready_reason); } -static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); isci_remote_device_ready(ihost, idev); } -static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); BUG_ON(idev->working_request == NULL); @@ -998,83 +998,83 @@ static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); } -static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) +static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); idev->working_request = NULL; } -static const struct sci_base_state scic_sds_remote_device_state_table[] = { +static const struct sci_base_state sci_remote_device_state_table[] = { [SCI_DEV_INITIAL] = { - .enter_state = scic_sds_remote_device_initial_state_enter, + .enter_state = sci_remote_device_initial_state_enter, }, [SCI_DEV_STOPPED] = { - .enter_state = scic_sds_remote_device_stopped_state_enter, + .enter_state = sci_remote_device_stopped_state_enter, }, [SCI_DEV_STARTING] = { - .enter_state = scic_sds_remote_device_starting_state_enter, + .enter_state = sci_remote_device_starting_state_enter, }, [SCI_DEV_READY] = { - .enter_state = scic_sds_remote_device_ready_state_enter, - .exit_state = scic_sds_remote_device_ready_state_exit + .enter_state = sci_remote_device_ready_state_enter, + .exit_state = sci_remote_device_ready_state_exit }, [SCI_STP_DEV_IDLE] = { - .enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter, + .enter_state = sci_stp_remote_device_ready_idle_substate_enter, }, [SCI_STP_DEV_CMD] = { - .enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter, + .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, }, [SCI_STP_DEV_NCQ] = { }, [SCI_STP_DEV_NCQ_ERROR] = { - .enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter, + .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, }, [SCI_STP_DEV_AWAIT_RESET] = { }, [SCI_SMP_DEV_IDLE] = { - .enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter, + .enter_state = sci_smp_remote_device_ready_idle_substate_enter, }, [SCI_SMP_DEV_CMD] = { - .enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter, - .exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit, + .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, + .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, }, [SCI_DEV_STOPPING] = { }, [SCI_DEV_FAILED] = { }, [SCI_DEV_RESETTING] = { - .enter_state = scic_sds_remote_device_resetting_state_enter, - .exit_state = scic_sds_remote_device_resetting_state_exit + .enter_state = sci_remote_device_resetting_state_enter, + .exit_state = sci_remote_device_resetting_state_exit }, [SCI_DEV_FINAL] = { }, }; /** - * scic_remote_device_construct() - common construction + * sci_remote_device_construct() - common construction * @sci_port: SAS/SATA port through which this device is accessed. * @sci_dev: remote device to construct * * This routine just performs benign initialization and does not * allocate the remote_node_context which is left to - * scic_remote_device_[de]a_construct(). scic_remote_device_destruct() + * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() * frees the remote_node_context(s) for the device. */ -static void scic_remote_device_construct(struct isci_port *iport, +static void sci_remote_device_construct(struct isci_port *iport, struct isci_remote_device *idev) { idev->owning_port = iport; idev->started_request_count = 0; - sci_init_sm(&idev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL); + sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); - scic_sds_remote_node_context_construct(&idev->rnc, + sci_remote_node_context_construct(&idev->rnc, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } /** - * scic_remote_device_da_construct() - construct direct attached device. + * sci_remote_device_da_construct() - construct direct attached device. * * The information (e.g. IAF, Signature FIS, etc.) necessary to build * the device is known to the SCI Core since it is contained in the - * scic_phy object. Remote node context(s) is/are a global resource - * allocated by this routine, freed by scic_remote_device_destruct(). + * sci_phy object. Remote node context(s) is/are a global resource + * allocated by this routine, freed by sci_remote_device_destruct(). * * Returns: * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. @@ -1082,20 +1082,20 @@ static void scic_remote_device_construct(struct isci_port *iport, * sata-only controller instance. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ -static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, +static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, struct isci_remote_device *idev) { enum sci_status status; struct domain_device *dev = idev->domain_dev; - scic_remote_device_construct(iport, idev); + sci_remote_device_construct(iport, idev); /* * This information is request to determine how many remote node context * entries will be needed to store the remote node. */ idev->is_direct_attached = true; - status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, + status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); @@ -1108,7 +1108,7 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - idev->connection_rate = scic_sds_port_get_max_allowed_speed(iport); + idev->connection_rate = sci_port_get_max_allowed_speed(iport); /* / @todo Should I assign the port width by reading all of the phys on the port? */ idev->device_port_width = 1; @@ -1117,10 +1117,10 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, } /** - * scic_remote_device_ea_construct() - construct expander attached device + * sci_remote_device_ea_construct() - construct expander attached device * * Remote node context(s) is/are a global resource allocated by this - * routine, freed by scic_remote_device_destruct(). + * routine, freed by sci_remote_device_destruct(). * * Returns: * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. @@ -1128,15 +1128,15 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, * sata-only controller instance. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ -static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, +static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, struct isci_remote_device *idev) { struct domain_device *dev = idev->domain_dev; enum sci_status status; - scic_remote_device_construct(iport, idev); + sci_remote_device_construct(iport, idev); - status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, + status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) @@ -1155,7 +1155,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, * connection the logical link rate is that same as the * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay * one another, so this code works for both situations. */ - idev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport), + idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), dev->linkrate); /* / @todo Should I assign the port width by reading all of the phys on the port? */ @@ -1165,7 +1165,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, } /** - * scic_remote_device_start() - This method will start the supplied remote + * sci_remote_device_start() - This method will start the supplied remote * device. This method enables normal IO requests to flow through to the * remote device. * @remote_device: This parameter specifies the device to be started. @@ -1177,11 +1177,11 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start * the device when there have been no phys added to it. */ -static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, +static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; enum sci_status status; if (state != SCI_DEV_STOPPED) { @@ -1190,7 +1190,7 @@ static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_node_context_resume(&idev->rnc, + status = sci_remote_node_context_resume(&idev->rnc, remote_device_resume_done, idev); if (status != SCI_SUCCESS) @@ -1209,9 +1209,9 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, enum sci_status status; if (dev->parent && dev_is_expander(dev->parent)) - status = scic_remote_device_ea_construct(iport, idev); + status = sci_remote_device_ea_construct(iport, idev); else - status = scic_remote_device_da_construct(iport, idev); + status = sci_remote_device_da_construct(iport, idev); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", @@ -1221,7 +1221,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, } /* start the device. */ - status = scic_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); + status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); if (status != SCI_SUCCESS) dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", @@ -1322,7 +1322,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem set_bit(IDEV_STOP_PENDING, &idev->flags); spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_stop(idev, 50); + status = sci_remote_device_stop(idev, 50); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the stop complete callback. */ diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index bc4da20a13fa..fa9a0e6cc309 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -61,7 +61,7 @@ #include "remote_node_context.h" #include "port.h" -enum scic_remote_device_not_ready_reason_code { +enum sci_remote_device_not_ready_reason_code { SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, @@ -97,7 +97,7 @@ struct isci_remote_device { enum sas_linkrate connection_rate; bool is_direct_attached; struct isci_port *owning_port; - struct scic_sds_remote_node_context rnc; + struct sci_remote_node_context rnc; /* XXX unify with device reference counting and delete */ u32 started_request_count; struct isci_request *working_request; @@ -106,7 +106,7 @@ struct isci_remote_device { #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 -/* device reference routines must be called under scic_lock */ +/* device reference routines must be called under sci_lock */ static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) { struct isci_remote_device *idev = dev->lldd_dev; @@ -137,7 +137,7 @@ bool isci_device_is_reset_pending(struct isci_host *ihost, void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev); /** - * scic_remote_device_stop() - This method will stop both transmission and + * sci_remote_device_stop() - This method will stop both transmission and * reception of link activity for the supplied remote device. This method * disables normal IO requests from flowing through to the remote device. * @remote_device: This parameter specifies the device to be stopped. @@ -148,12 +148,12 @@ void isci_device_clear_reset_pending(struct isci_host *ihost, * This value is returned if the transmission and reception for the device was * successfully stopped. */ -enum sci_status scic_remote_device_stop( +enum sci_status sci_remote_device_stop( struct isci_remote_device *idev, u32 timeout); /** - * scic_remote_device_reset() - This method will reset the device making it + * sci_remote_device_reset() - This method will reset the device making it * ready for operation. This method must be called anytime the device is * reset either through a SMP phy control or a port hard reset request. * @remote_device: This parameter specifies the device to be reset. @@ -164,11 +164,11 @@ enum sci_status scic_remote_device_stop( * was accepted. SCI_SUCCESS This value is returned if the device reset is * started. */ -enum sci_status scic_remote_device_reset( +enum sci_status sci_remote_device_reset( struct isci_remote_device *idev); /** - * scic_remote_device_reset_complete() - This method informs the device object + * sci_remote_device_reset_complete() - This method informs the device object * that the reset operation is complete and the device can resume operation * again. * @remote_device: This parameter specifies the device which is to be informed @@ -177,18 +177,16 @@ enum sci_status scic_remote_device_reset( * An indication that the device is resuming operation. SCI_SUCCESS the device * is resuming operation. */ -enum sci_status scic_remote_device_reset_complete( +enum sci_status sci_remote_device_reset_complete( struct isci_remote_device *idev); -#define scic_remote_device_is_atapi(device_handle) false - /** - * enum scic_sds_remote_device_states - This enumeration depicts all the states + * enum sci_remote_device_states - This enumeration depicts all the states * for the common remote device state machine. * * */ -enum scic_sds_remote_device_states { +enum sci_remote_device_states { /** * Simply the initial state for the base remote device state machine. */ @@ -293,7 +291,7 @@ enum scic_sds_remote_device_states { SCI_DEV_FINAL, }; -static inline struct isci_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc) +static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) { struct isci_remote_device *idev; @@ -308,122 +306,120 @@ static inline bool dev_is_expander(struct domain_device *dev) } /** - * scic_sds_remote_device_increment_request_count() - + * sci_remote_device_increment_request_count() - * * This macro incrments the request count for this device */ -#define scic_sds_remote_device_increment_request_count(idev) \ +#define sci_remote_device_increment_request_count(idev) \ ((idev)->started_request_count++) /** - * scic_sds_remote_device_decrement_request_count() - + * sci_remote_device_decrement_request_count() - * * This macro decrements the request count for this device. This count will * never decrment past 0. */ -#define scic_sds_remote_device_decrement_request_count(idev) \ +#define sci_remote_device_decrement_request_count(idev) \ ((idev)->started_request_count > 0 ? \ (idev)->started_request_count-- : 0) /** - * scic_sds_remote_device_get_request_count() - + * sci_remote_device_get_request_count() - * * This is a helper macro to return the current device request count. */ -#define scic_sds_remote_device_get_request_count(idev) \ +#define sci_remote_device_get_request_count(idev) \ ((idev)->started_request_count) /** - * scic_sds_remote_device_get_controller() - + * sci_remote_device_get_controller() - * * This macro returns the controller object that contains this device object */ -#define scic_sds_remote_device_get_controller(idev) \ - scic_sds_port_get_controller(scic_sds_remote_device_get_port(idev)) +#define sci_remote_device_get_controller(idev) \ + sci_port_get_controller(sci_remote_device_get_port(idev)) /** - * scic_sds_remote_device_get_port() - + * sci_remote_device_get_port() - * * This macro returns the owning port of this device */ -#define scic_sds_remote_device_get_port(idev) \ +#define sci_remote_device_get_port(idev) \ ((idev)->owning_port) /** - * scic_sds_remote_device_get_controller_peg() - + * sci_remote_device_get_controller_peg() - * * This macro returns the controllers protocol engine group */ -#define scic_sds_remote_device_get_controller_peg(idev) \ +#define sci_remote_device_get_controller_peg(idev) \ (\ - scic_sds_controller_get_protocol_engine_group(\ - scic_sds_port_get_controller(\ - scic_sds_remote_device_get_port(idev) \ + sci_controller_get_protocol_engine_group(\ + sci_port_get_controller(\ + sci_remote_device_get_port(idev) \ ) \ ) \ ) /** - * scic_sds_remote_device_get_index() - + * sci_remote_device_get_index() - * * This macro returns the remote node index for this device object */ -#define scic_sds_remote_device_get_index(idev) \ +#define sci_remote_device_get_index(idev) \ ((idev)->rnc.remote_node_index) /** - * scic_sds_remote_device_build_command_context() - + * sci_remote_device_build_command_context() - * * This macro builds a remote device context for the SCU post request operation */ -#define scic_sds_remote_device_build_command_context(device, command) \ +#define sci_remote_device_build_command_context(device, command) \ ((command) \ - | (scic_sds_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ + | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \ - | (scic_sds_remote_device_get_index((device))) \ + | (sci_remote_device_get_index((device))) \ ) /** - * scic_sds_remote_device_set_working_request() - + * sci_remote_device_set_working_request() - * * This macro makes the working request assingment for the remote device * object. To clear the working request use this macro with a NULL request * object. */ -#define scic_sds_remote_device_set_working_request(device, request) \ +#define sci_remote_device_set_working_request(device, request) \ ((device)->working_request = (request)) -enum sci_status scic_sds_remote_device_frame_handler( +enum sci_status sci_remote_device_frame_handler( struct isci_remote_device *idev, u32 frame_index); -enum sci_status scic_sds_remote_device_event_handler( +enum sci_status sci_remote_device_event_handler( struct isci_remote_device *idev, u32 event_code); -enum sci_status scic_sds_remote_device_start_io( +enum sci_status sci_remote_device_start_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_remote_device_start_task( +enum sci_status sci_remote_device_start_task( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_remote_device_complete_io( +enum sci_status sci_remote_device_complete_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_remote_device_suspend( +enum sci_status sci_remote_device_suspend( struct isci_remote_device *idev, u32 suspend_type); -void scic_sds_remote_device_post_request( +void sci_remote_device_post_request( struct isci_remote_device *idev, u32 request); -#define scic_sds_remote_device_is_atapi(idev) false - #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 8a5203b6eb09..c2dfd5a72181 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -81,8 +81,8 @@ * otherwise it will return false bool true if the remote node context is in * the ready state. false if the remote node context is not in the ready state. */ -bool scic_sds_remote_node_context_is_ready( - struct scic_sds_remote_node_context *sci_rnc) +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; @@ -93,15 +93,16 @@ bool scic_sds_remote_node_context_is_ready( return false; } -/** - * - * @sci_dev: The remote device to use to construct the RNC buffer. - * @rnc: The buffer into which the remote device data will be copied. - * - * This method will construct the RNC buffer for this remote device object. none - */ -static void scic_sds_remote_node_context_construct_buffer( - struct scic_sds_remote_node_context *sci_rnc) +static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) +{ + if (id < ihost->remote_node_entries && + ihost->device_table[id]) + return &ihost->remote_node_context_table[id]; + + return NULL; +} + +static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; @@ -110,11 +111,11 @@ static void scic_sds_remote_node_context_construct_buffer( struct isci_host *ihost; __le64 sas_addr; - ihost = scic_sds_remote_device_get_controller(idev); - rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni); + ihost = sci_remote_device_get_controller(idev); + rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) - * scic_sds_remote_device_node_count(idev)); + * sci_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = idev->device_port_width; @@ -135,14 +136,14 @@ static void scic_sds_remote_node_context_construct_buffer( if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { rnc->ssp.connection_occupancy_timeout = - ihost->user_parameters.sds1.stp_max_occupancy_timeout; + ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = - ihost->user_parameters.sds1.stp_inactivity_timeout; + ihost->user_parameters.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = - ihost->user_parameters.sds1.ssp_max_occupancy_timeout; + ihost->user_parameters.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = - ihost->user_parameters.sds1.ssp_inactivity_timeout; + ihost->user_parameters.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; @@ -164,8 +165,8 @@ static void scic_sds_remote_node_context_construct_buffer( * to its ready state. If the remote node context is already setup to * transition to its final state then this function does nothing. none */ -static void scic_sds_remote_node_context_setup_to_resume( - struct scic_sds_remote_node_context *sci_rnc, +static void sci_remote_node_context_setup_to_resume( + struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { @@ -176,8 +177,8 @@ static void scic_sds_remote_node_context_setup_to_resume( } } -static void scic_sds_remote_node_context_setup_to_destory( - struct scic_sds_remote_node_context *sci_rnc, +static void sci_remote_node_context_setup_to_destory( + struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { @@ -192,8 +193,8 @@ static void scic_sds_remote_node_context_setup_to_destory( * This method just calls the user callback function and then resets the * callback. */ -static void scic_sds_remote_node_context_notify_user( - struct scic_sds_remote_node_context *rnc) +static void sci_remote_node_context_notify_user( + struct sci_remote_node_context *rnc) { if (rnc->user_callback != NULL) { (*rnc->user_callback)(rnc->user_cookie); @@ -203,99 +204,80 @@ static void scic_sds_remote_node_context_notify_user( } } -static void scic_sds_remote_node_context_continue_state_transitions(struct scic_sds_remote_node_context *rnc) +static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) { if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) - scic_sds_remote_node_context_resume(rnc, rnc->user_callback, + sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); } -/** - * - * @sci_rnc: The remote node context object that is to be validated. - * - * This method will mark the rnc buffer as being valid and post the request to - * the hardware. none - */ -static void scic_sds_remote_node_context_validate_context_buffer( - struct scic_sds_remote_node_context *sci_rnc) +static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) { + union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; - union scu_remote_node_context *rnc_buffer; + struct isci_host *ihost = idev->owning_port->owning_controller; - rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( - scic_sds_remote_device_get_controller(idev), - sci_rnc->remote_node_index - ); + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = true; if (!idev->is_direct_attached && (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { - scic_sds_remote_device_post_request(idev, - SCU_CONTEXT_COMMAND_POST_RNC_96); + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { - scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); - if (idev->is_direct_attached) { - scic_sds_port_setup_transports(idev->owning_port, - sci_rnc->remote_node_index); - } + if (idev->is_direct_attached) + sci_port_setup_transports(idev->owning_port, + sci_rnc->remote_node_index); } } -/** - * - * @sci_rnc: The remote node context object that is to be invalidated. - * - * This method will update the RNC buffer and post the invalidate request. none - */ -static void scic_sds_remote_node_context_invalidate_context_buffer( - struct scic_sds_remote_node_context *sci_rnc) +static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; - rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( - scic_sds_remote_device_get_controller(rnc_to_dev(sci_rnc)), - sci_rnc->remote_node_index); + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = false; - scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc), - SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); + sci_remote_device_post_request(rnc_to_dev(sci_rnc), + SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); } -static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ if (sm->previous_state_id == SCI_RNC_INVALIDATING) { rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; - scic_sds_remote_node_context_notify_user(rnc); + sci_remote_node_context_notify_user(rnc); } } -static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); + struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); - scic_sds_remote_node_context_validate_context_buffer(sci_rnc); + sci_remote_node_context_validate_context_buffer(sci_rnc); } -static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - scic_sds_remote_node_context_invalidate_context_buffer(rnc); + sci_remote_node_context_invalidate_context_buffer(rnc); } -static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev; struct domain_device *dev; @@ -310,73 +292,73 @@ static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_st */ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && idev->is_direct_attached) - scic_sds_port_setup_transports(idev->owning_port, + sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); - scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } -static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; if (rnc->user_callback) - scic_sds_remote_node_context_notify_user(rnc); + sci_remote_node_context_notify_user(rnc); } -static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - scic_sds_remote_node_context_continue_state_transitions(rnc); + sci_remote_node_context_continue_state_transitions(rnc); } -static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - scic_sds_remote_node_context_continue_state_transitions(rnc); + sci_remote_node_context_continue_state_transitions(rnc); } -static const struct sci_base_state scic_sds_remote_node_context_state_table[] = { +static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_INITIAL] = { - .enter_state = scic_sds_remote_node_context_initial_state_enter, + .enter_state = sci_remote_node_context_initial_state_enter, }, [SCI_RNC_POSTING] = { - .enter_state = scic_sds_remote_node_context_posting_state_enter, + .enter_state = sci_remote_node_context_posting_state_enter, }, [SCI_RNC_INVALIDATING] = { - .enter_state = scic_sds_remote_node_context_invalidating_state_enter, + .enter_state = sci_remote_node_context_invalidating_state_enter, }, [SCI_RNC_RESUMING] = { - .enter_state = scic_sds_remote_node_context_resuming_state_enter, + .enter_state = sci_remote_node_context_resuming_state_enter, }, [SCI_RNC_READY] = { - .enter_state = scic_sds_remote_node_context_ready_state_enter, + .enter_state = sci_remote_node_context_ready_state_enter, }, [SCI_RNC_TX_SUSPENDED] = { - .enter_state = scic_sds_remote_node_context_tx_suspended_state_enter, + .enter_state = sci_remote_node_context_tx_suspended_state_enter, }, [SCI_RNC_TX_RX_SUSPENDED] = { - .enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter, + .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, }, [SCI_RNC_AWAIT_SUSPENSION] = { }, }; -void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index) { - memset(rnc, 0, sizeof(struct scic_sds_remote_node_context)); + memset(rnc, 0, sizeof(struct sci_remote_node_context)); rnc->remote_node_index = remote_node_index; rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; - sci_init_sm(&rnc->sm, scic_sds_remote_node_context_state_table, SCI_RNC_INITIAL); + sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); } -enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code) { enum scis_sds_remote_node_context_states state; @@ -476,7 +458,7 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot } -enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { @@ -485,7 +467,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: - scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: @@ -493,7 +475,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: - scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; case SCI_RNC_INITIAL: @@ -511,7 +493,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod } } -enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, u32 suspend_type, scics_sds_remote_node_context_callback cb_fn, void *cb_p) @@ -530,7 +512,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node sci_rnc->suspension_code = suspend_type; if (suspend_type == SCI_SOFTWARE_SUSPENSION) { - scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc), + sci_remote_device_post_request(rnc_to_dev(sci_rnc), SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); } @@ -538,7 +520,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node return SCI_SUCCESS; } -enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { @@ -550,8 +532,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); - scic_sds_remote_node_context_construct_buffer(sci_rnc); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_construct_buffer(sci_rnc); sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); return SCI_SUCCESS; case SCI_RNC_POSTING: @@ -567,7 +549,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) @@ -584,11 +566,11 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ return SCI_SUCCESS; } case SCI_RNC_TX_RX_SUSPENDED: - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); return SCI_FAILURE_INVALID_STATE; case SCI_RNC_AWAIT_SUSPENSION: - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), @@ -597,7 +579,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ } } -enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; @@ -622,7 +604,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; @@ -635,7 +617,7 @@ enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_n return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: - scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL); + sci_remote_node_context_resume(sci_rnc, NULL, NULL); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index 7a24c7a12de1..b475c5c26642 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -80,7 +80,7 @@ struct isci_request; struct isci_remote_device; -struct scic_sds_remote_node_context; +struct sci_remote_node_context; typedef void (*scics_sds_remote_node_context_callback)(void *); @@ -147,19 +147,19 @@ enum scis_sds_remote_node_context_states { * This enumeration is used to define the end destination state for the remote * node context. */ -enum scic_sds_remote_node_context_destination_state { +enum sci_remote_node_context_destination_state { SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL }; /** - * struct scic_sds_remote_node_context - This structure contains the data + * struct sci_remote_node_context - This structure contains the data * associated with the remote node context object. The remote node context * (RNC) object models the the remote device information necessary to manage * the silicon RNC. */ -struct scic_sds_remote_node_context { +struct sci_remote_node_context { /** * This field indicates the remote node index (RNI) associated with * this RNC. @@ -177,7 +177,7 @@ struct scic_sds_remote_node_context { * state. This can cause an automatic resume on receiving a suspension * notification. */ - enum scic_sds_remote_node_context_destination_state destination_state; + enum sci_remote_node_context_destination_state destination_state; /** * This field contains the callback function that the user requested to be @@ -197,31 +197,31 @@ struct scic_sds_remote_node_context { struct sci_base_state_machine sm; }; -void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index); -bool scic_sds_remote_node_context_is_ready( - struct scic_sds_remote_node_context *sci_rnc); +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc); -#define scic_sds_remote_node_context_get_remote_node_index(rcn) \ +#define sci_remote_node_context_get_remote_node_index(rcn) \ ((rnc)->remote_node_index) -enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code); -enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter); -enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, u32 suspend_type, scics_sds_remote_node_context_callback cb_fn, void *cb_p); -enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p); -enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq); -enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq); #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c index 6b9465a5a217..301b3141945e 100644 --- a/drivers/scsi/isci/remote_node_table.c +++ b/drivers/scsi/isci/remote_node_table.c @@ -74,8 +74,8 @@ * just bit position. u32 This is the absolute bit position for an available * group. */ -static u32 scic_sds_remote_node_table_get_group_index( - struct scic_remote_node_table *remote_node_table, +static u32 sci_remote_node_table_get_group_index( + struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u32 dword_index; @@ -108,8 +108,8 @@ static u32 scic_sds_remote_node_table_get_group_index( * This method will clear the group index entry in the specified group index * table. none */ -static void scic_sds_remote_node_table_clear_group_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_clear_group_index( + struct sci_remote_node_table *remote_node_table, u32 group_table_index, u32 group_index) { @@ -138,8 +138,8 @@ static void scic_sds_remote_node_table_clear_group_index( * This method will set the group index bit entry in the specified gropu index * table. none */ -static void scic_sds_remote_node_table_set_group_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_set_group_index( + struct sci_remote_node_table *remote_node_table, u32 group_table_index, u32 group_index) { @@ -167,8 +167,8 @@ static void scic_sds_remote_node_table_set_group_index( * This method will set the remote to available in the remote node allocation * table. none */ -static void scic_sds_remote_node_table_set_node_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_set_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_index) { u32 dword_location; @@ -200,8 +200,8 @@ static void scic_sds_remote_node_table_set_node_index( * This method clears the remote node index from the table of available remote * nodes. none */ -static void scic_sds_remote_node_table_clear_node_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_clear_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_index) { u32 dword_location; @@ -231,8 +231,8 @@ static void scic_sds_remote_node_table_clear_node_index( * * This method clears the entire table slot at the specified slot index. none */ -static void scic_sds_remote_node_table_clear_group( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_clear_group( + struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; @@ -258,8 +258,8 @@ static void scic_sds_remote_node_table_clear_group( * * THis method sets an entire remote node group in the remote node table. */ -static void scic_sds_remote_node_table_set_group( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_set_group( + struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; @@ -288,8 +288,8 @@ static void scic_sds_remote_node_table_set_group( * This method will return the group value for the specified group index. The * bit values at the specified remote node group index. */ -static u8 scic_sds_remote_node_table_get_group_value( - struct scic_remote_node_table *remote_node_table, +static u8 sci_remote_node_table_get_group_value( + struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; @@ -313,8 +313,8 @@ static u8 scic_sds_remote_node_table_get_group_value( * * This method will initialize the remote node table for use. none */ -void scic_sds_remote_node_table_initialize( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, u32 remote_node_entries) { u32 index; @@ -342,7 +342,7 @@ void scic_sds_remote_node_table_initialize( /* Initialize each full DWORD to a FULL SET of remote nodes */ for (index = 0; index < remote_node_entries; index++) { - scic_sds_remote_node_table_set_node_index(remote_node_table, index); + sci_remote_node_table_set_node_index(remote_node_table, index); } remote_node_table->group_array_size = (u16) @@ -353,14 +353,14 @@ void scic_sds_remote_node_table_initialize( /* * These are all guaranteed to be full slot values so fill them in the * available sets of 3 remote nodes */ - scic_sds_remote_node_table_set_group_index(remote_node_table, 2, index); + sci_remote_node_table_set_group_index(remote_node_table, 2, index); } /* Now fill in any remainders that we may find */ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { - scic_sds_remote_node_table_set_group_index(remote_node_table, 1, index); + sci_remote_node_table_set_group_index(remote_node_table, 1, index); } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { - scic_sds_remote_node_table_set_group_index(remote_node_table, 0, index); + sci_remote_node_table_set_group_index(remote_node_table, 0, index); } } @@ -379,8 +379,8 @@ void scic_sds_remote_node_table_initialize( * updated. The RNi value or an invalid remote node context if an RNi can not * be found. */ -static u16 scic_sds_remote_node_table_allocate_single_remote_node( - struct scic_remote_node_table *remote_node_table, +static u16 sci_remote_node_table_allocate_single_remote_node( + struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u8 index; @@ -388,12 +388,12 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( u32 group_index; u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; - group_index = scic_sds_remote_node_table_get_group_index( + group_index = sci_remote_node_table_get_group_index( remote_node_table, group_table_index); /* We could not find an available slot in the table selector 0 */ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { - group_value = scic_sds_remote_node_table_get_group_value( + group_value = sci_remote_node_table_get_group_value( remote_node_table, group_index); for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { @@ -402,16 +402,16 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT + index); - scic_sds_remote_node_table_clear_group_index( + sci_remote_node_table_clear_group_index( remote_node_table, group_table_index, group_index ); - scic_sds_remote_node_table_clear_node_index( + sci_remote_node_table_clear_node_index( remote_node_table, remote_node_index ); if (group_table_index > 0) { - scic_sds_remote_node_table_set_group_index( + sci_remote_node_table_set_group_index( remote_node_table, group_table_index - 1, group_index ); } @@ -436,24 +436,24 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( * The remote node index that represents three consecutive remote node entries * or an invalid remote node context if none can be found. */ -static u16 scic_sds_remote_node_table_allocate_triple_remote_node( - struct scic_remote_node_table *remote_node_table, +static u16 sci_remote_node_table_allocate_triple_remote_node( + struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u32 group_index; u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; - group_index = scic_sds_remote_node_table_get_group_index( + group_index = sci_remote_node_table_get_group_index( remote_node_table, group_table_index); if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; - scic_sds_remote_node_table_clear_group_index( + sci_remote_node_table_clear_group_index( remote_node_table, group_table_index, group_index ); - scic_sds_remote_node_table_clear_group( + sci_remote_node_table_clear_group( remote_node_table, group_index ); } @@ -473,31 +473,31 @@ static u16 scic_sds_remote_node_table_allocate_triple_remote_node( * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is * the remote node index that is returned or an invalid remote node context. */ -u16 scic_sds_remote_node_table_allocate_remote_node( - struct scic_remote_node_table *remote_node_table, +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count) { u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { remote_node_index = - scic_sds_remote_node_table_allocate_single_remote_node( + sci_remote_node_table_allocate_single_remote_node( remote_node_table, 0); if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { remote_node_index = - scic_sds_remote_node_table_allocate_single_remote_node( + sci_remote_node_table_allocate_single_remote_node( remote_node_table, 1); } if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { remote_node_index = - scic_sds_remote_node_table_allocate_single_remote_node( + sci_remote_node_table_allocate_single_remote_node( remote_node_table, 2); } } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { remote_node_index = - scic_sds_remote_node_table_allocate_triple_remote_node( + sci_remote_node_table_allocate_triple_remote_node( remote_node_table, 2); } @@ -511,8 +511,8 @@ u16 scic_sds_remote_node_table_allocate_remote_node( * This method will free a single remote node index back to the remote node * table. This routine will update the remote node groups */ -static void scic_sds_remote_node_table_release_single_remote_node( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_release_single_remote_node( + struct sci_remote_node_table *remote_node_table, u16 remote_node_index) { u32 group_index; @@ -520,7 +520,7 @@ static void scic_sds_remote_node_table_release_single_remote_node( group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; - group_value = scic_sds_remote_node_table_get_group_value(remote_node_table, group_index); + group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index); /* * Assert that we are not trying to add an entry to a slot that is already @@ -531,22 +531,22 @@ static void scic_sds_remote_node_table_release_single_remote_node( /* * There are no entries in this slot so it must be added to the single * slot table. */ - scic_sds_remote_node_table_set_group_index(remote_node_table, 0, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 0, group_index); } else if ((group_value & (group_value - 1)) == 0) { /* * There is only one entry in this slot so it must be moved from the * single slot table to the dual slot table */ - scic_sds_remote_node_table_clear_group_index(remote_node_table, 0, group_index); - scic_sds_remote_node_table_set_group_index(remote_node_table, 1, group_index); + sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 1, group_index); } else { /* * There are two entries in the slot so it must be moved from the dual * slot table to the tripple slot table. */ - scic_sds_remote_node_table_clear_group_index(remote_node_table, 1, group_index); - scic_sds_remote_node_table_set_group_index(remote_node_table, 2, group_index); + sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 2, group_index); } - scic_sds_remote_node_table_set_node_index(remote_node_table, remote_node_index); + sci_remote_node_table_set_node_index(remote_node_table, remote_node_index); } /** @@ -557,19 +557,19 @@ static void scic_sds_remote_node_table_release_single_remote_node( * This method will release a group of three consecutive remote nodes back to * the free remote nodes. */ -static void scic_sds_remote_node_table_release_triple_remote_node( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_release_triple_remote_node( + struct sci_remote_node_table *remote_node_table, u16 remote_node_index) { u32 group_index; group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; - scic_sds_remote_node_table_set_group_index( + sci_remote_node_table_set_group_index( remote_node_table, 2, group_index ); - scic_sds_remote_node_table_set_group(remote_node_table, group_index); + sci_remote_node_table_set_group(remote_node_table, group_index); } /** @@ -582,16 +582,16 @@ static void scic_sds_remote_node_table_release_triple_remote_node( * This method will release the remote node index back into the remote node * table free pool. */ -void scic_sds_remote_node_table_release_remote_node_index( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count, u16 remote_node_index) { if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { - scic_sds_remote_node_table_release_single_remote_node( + sci_remote_node_table_release_single_remote_node( remote_node_table, remote_node_index); } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { - scic_sds_remote_node_table_release_triple_remote_node( + sci_remote_node_table_release_triple_remote_node( remote_node_table, remote_node_index); } } diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h index 5737d9a30cca..721ab982d2ac 100644 --- a/drivers/scsi/isci/remote_node_table.h +++ b/drivers/scsi/isci/remote_node_table.h @@ -130,11 +130,11 @@ #define SCU_SATA_REMOTE_NODE_COUNT 1 /** - * struct scic_remote_node_table - + * struct sci_remote_node_table - * * */ -struct scic_remote_node_table { +struct sci_remote_node_table { /** * This field contains the array size in dwords */ @@ -172,16 +172,16 @@ struct scic_remote_node_table { /* --------------------------------------------------------------------------- */ -void scic_sds_remote_node_table_initialize( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, u32 remote_node_entries); -u16 scic_sds_remote_node_table_allocate_remote_node( - struct scic_remote_node_table *remote_node_table, +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count); -void scic_sds_remote_node_table_release_remote_node_index( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count, u16 remote_node_index); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 36e674896bc5..bcb3c08c19a7 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -89,7 +89,7 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, return ihost->task_context_dma + offset; } - return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); + return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); } static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) @@ -100,7 +100,7 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) e->address_modifier = 0; } -static void scic_sds_request_build_sgl(struct isci_request *ireq) +static void sci_request_build_sgl(struct isci_request *ireq) { struct isci_host *ihost = ireq->isci_host; struct sas_task *task = isci_request_access_task(ireq); @@ -158,7 +158,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) } } -static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) +static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) { struct ssp_cmd_iu *cmd_iu; struct sas_task *task = isci_request_access_task(ireq); @@ -178,7 +178,7 @@ static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) sizeof(task->ssp_task.cdb) / sizeof(u32)); } -static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq) +static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) { struct ssp_task_iu *task_iu; struct sas_task *task = isci_request_access_task(ireq); @@ -211,8 +211,8 @@ static void scu_ssp_reqeust_construct_task_context( struct isci_remote_device *idev; struct isci_port *iport; - idev = scic_sds_request_get_device(ireq); - iport = scic_sds_request_get_port(ireq); + idev = sci_request_get_device(ireq); + iport = sci_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -220,13 +220,13 @@ static void scu_ssp_reqeust_construct_task_context( task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = scic_sds_port_get_index(iport); + sci_controller_get_protocol_engine_group(controller); + task_context->logical_port_index = sci_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = scic_sds_remote_device_get_index(idev); + task_context->remote_node_index = sci_remote_device_get_index(idev); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -242,9 +242,9 @@ static void scu_ssp_reqeust_construct_task_context( task_context->task_phase = 0x01; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(controller) << + (sci_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(iport) << + (sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); @@ -252,7 +252,7 @@ static void scu_ssp_reqeust_construct_task_context( * Copy the physical address for the command buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); task_context->command_iu_upper = upper_32_bits(dma_addr); task_context->command_iu_lower = lower_32_bits(dma_addr); @@ -261,7 +261,7 @@ static void scu_ssp_reqeust_construct_task_context( * Copy the physical address for the response buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); task_context->response_iu_upper = upper_32_bits(dma_addr); task_context->response_iu_lower = lower_32_bits(dma_addr); @@ -298,7 +298,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, task_context->transfer_length_bytes = len; if (task_context->transfer_length_bytes > 0) - scic_sds_request_build_sgl(ireq); + sci_request_build_sgl(ireq); } /** @@ -349,8 +349,8 @@ static void scu_sata_reqeust_construct_task_context( struct isci_remote_device *idev; struct isci_port *iport; - idev = scic_sds_request_get_device(ireq); - iport = scic_sds_request_get_port(ireq); + idev = sci_request_get_device(ireq); + iport = sci_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -358,14 +358,14 @@ static void scu_sata_reqeust_construct_task_context( task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); + sci_controller_get_protocol_engine_group(controller); task_context->logical_port_index = - scic_sds_port_get_index(iport); + sci_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = scic_sds_remote_device_get_index(idev); + task_context->remote_node_index = sci_remote_device_get_index(idev); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -385,9 +385,9 @@ static void scu_sata_reqeust_construct_task_context( task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(controller) << + (sci_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(iport) << + (sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* @@ -395,7 +395,7 @@ static void scu_sata_reqeust_construct_task_context( * Context. We must offset the command buffer by 4 bytes because the * first 4 bytes are transfered in the body of the TC. */ - dma_addr = scic_io_request_get_dma_addr(ireq, + dma_addr = sci_io_request_get_dma_addr(ireq, ((char *) &ireq->stp.cmd) + sizeof(u32)); @@ -420,7 +420,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); } -static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq, +static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, bool copy_rx_frame) { struct isci_stp_request *stp_req = &ireq->stp.req; @@ -432,7 +432,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; if (copy_rx_frame) { - scic_sds_request_build_sgl(ireq); + sci_request_build_sgl(ireq); stp_req->sgl.index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ @@ -454,7 +454,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful. */ -static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, +static void sci_stp_optimized_request_construct(struct isci_request *ireq, u8 optimized_task_type, u32 len, enum dma_data_direction dir) @@ -465,7 +465,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, scu_sata_reqeust_construct_task_context(ireq, task_context); /* Copy over the SGL elements */ - scic_sds_request_build_sgl(ireq); + sci_request_build_sgl(ireq); /* Copy over the number of bytes to be transfered */ task_context->transfer_length_bytes = len; @@ -490,7 +490,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, static enum sci_status -scic_io_request_construct_sata(struct isci_request *ireq, +sci_io_request_construct_sata(struct isci_request *ireq, u32 len, enum dma_data_direction dir, bool copy) @@ -533,7 +533,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, /* NCQ */ if (task->ata_task.use_ncq) { - scic_sds_stp_optimized_request_construct(ireq, + sci_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_FPDMAQ_READ, len, dir); return SCI_SUCCESS; @@ -541,17 +541,17 @@ scic_io_request_construct_sata(struct isci_request *ireq, /* DMA */ if (task->ata_task.dma_xfer) { - scic_sds_stp_optimized_request_construct(ireq, + sci_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_DMA_IN, len, dir); return SCI_SUCCESS; } else /* PIO */ - return scic_sds_stp_pio_request_construct(ireq, copy); + return sci_stp_pio_request_construct(ireq, copy); return status; } -static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq) +static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) { struct sas_task *task = isci_request_access_task(ireq); @@ -561,28 +561,28 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request * task->data_dir, task->total_xfer_len); - scic_sds_io_request_build_ssp_command_iu(ireq); + sci_io_request_build_ssp_command_iu(ireq); sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } -enum sci_status scic_task_request_construct_ssp( +enum sci_status sci_task_request_construct_ssp( struct isci_request *ireq) { /* Construct the SSP Task SCU Task Context */ scu_ssp_task_request_construct_task_context(ireq); /* Fill in the SSP Task IU */ - scic_sds_task_request_build_ssp_task_iu(ireq); + sci_task_request_build_ssp_task_iu(ireq); sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } -static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq) +static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) { enum sci_status status; bool copy = false; @@ -592,7 +592,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request copy = (task->data_dir == DMA_NONE) ? false : true; - status = scic_io_request_construct_sata(ireq, + status = sci_io_request_construct_sata(ireq, task->total_xfer_len, task->data_dir, copy); @@ -603,7 +603,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request return status; } -enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) +enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) { enum sci_status status = SCI_SUCCESS; @@ -648,7 +648,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq) * BAR1 is the scu_registers * 0x20002C = 0x200000 + 0x2c * = start of task context SRAM + offset of (type.ssp.data_offset) - * TCi is the io_tag of struct scic_sds_request + * TCi is the io_tag of struct sci_request */ ret_val = readl(scu_reg_base + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + @@ -658,7 +658,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq) return ret_val; } -enum sci_status scic_sds_request_start(struct isci_request *ireq) +enum sci_status sci_request_start(struct isci_request *ireq) { enum sci_base_request_states state; struct scu_task_context *tc = ireq->tc; @@ -708,7 +708,7 @@ enum sci_status scic_sds_request_start(struct isci_request *ireq) } enum sci_status -scic_sds_io_request_terminate(struct isci_request *ireq) +sci_io_request_terminate(struct isci_request *ireq) { enum sci_base_request_states state; @@ -716,7 +716,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) switch (state) { case SCI_REQ_CONSTRUCTED: - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); @@ -759,7 +759,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_request_complete(struct isci_request *ireq) +enum sci_status sci_request_complete(struct isci_request *ireq) { enum sci_base_request_states state; struct isci_host *ihost = ireq->owning_controller; @@ -770,7 +770,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) return SCI_FAILURE_INVALID_STATE; if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) - scic_sds_controller_release_frame(ihost, + sci_controller_release_frame(ihost, ireq->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ @@ -778,7 +778,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) return SCI_SUCCESS; } -enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, +enum sci_status sci_io_request_event_handler(struct isci_request *ireq, u32 event_code) { enum sci_base_request_states state; @@ -818,7 +818,7 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, * @sci_req: This parameter specifies the request object for which to copy * the response data. */ -static void scic_sds_io_request_copy_response(struct isci_request *ireq) +static void sci_io_request_copy_response(struct isci_request *ireq) { void *resp_buf; u32 len; @@ -848,7 +848,7 @@ request_started_state_tc_event(struct isci_request *ireq, */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -868,11 +868,11 @@ request_started_state_tc_event(struct isci_request *ireq, word_cnt); if (resp->status == 0) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS_IO_DONE_EARLY); } else { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } @@ -885,7 +885,7 @@ request_started_state_tc_event(struct isci_request *ireq, &ireq->ssp.rsp, word_cnt); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -900,11 +900,11 @@ request_started_state_tc_event(struct isci_request *ireq, datapres = resp_iu->datapres; if (datapres == 1 || datapres == 2) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -921,12 +921,12 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): if (ireq->protocol == SCIC_STP_PROTOCOL) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); } else { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -944,7 +944,7 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); @@ -967,7 +967,7 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: - scic_sds_request_set_status( + sci_request_set_status( ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, @@ -991,7 +991,7 @@ request_aborting_state_tc_event(struct isci_request *ireq, switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, + sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1012,7 +1012,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); @@ -1036,7 +1036,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1057,7 +1057,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * unexpected. but if the TC has success status, we * complete the IO anyway. */ - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1074,7 +1074,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * these SMP_XXX_XX_ERR status. For these type of error, * we ask ihost user to retry the request. */ - scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, + sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1084,7 +1084,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, /* All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1101,7 +1101,7 @@ smp_request_await_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1111,7 +1111,7 @@ smp_request_await_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1122,7 +1122,7 @@ smp_request_await_tc_event(struct isci_request *ireq, return SCI_SUCCESS; } -void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, +void sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag) { /** @@ -1171,7 +1171,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); @@ -1182,7 +1182,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1198,7 +1198,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, /* transmit DATA_FIS from (current sgl + offset) for input * parameter length. current sgl and offset is alreay stored in the IO request */ -static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( +static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( struct isci_request *ireq, u32 length) { @@ -1223,10 +1223,10 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( task_context->type.stp.fis_type = FIS_DATA; /* send the new TC out. */ - return scic_controller_continue_io(ireq); + return sci_controller_continue_io(ireq); } -static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) +static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) { struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_sgl_element_pair *sgl_pair; @@ -1252,7 +1252,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is return SCI_SUCCESS; if (stp_req->pio_len >= len) { - status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len); + status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); if (status != SCI_SUCCESS) return status; stp_req->pio_len -= len; @@ -1261,7 +1261,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is sgl = pio_sgl_next(stp_req); offset = 0; } else if (stp_req->pio_len < len) { - scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); + sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); /* Sgl offset will be adjusted and saved for future */ offset += stp_req->pio_len; @@ -1284,7 +1284,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is * specified data region. enum sci_status */ static enum sci_status -scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, +sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, u8 *data_buf, u32 len) { struct isci_request *ireq; @@ -1328,7 +1328,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_r * * Copy the data buffer to the io request data region. enum sci_status */ -static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( +static enum sci_status sci_stp_request_pio_data_in_copy_data( struct isci_stp_request *stp_req, u8 *data_buffer) { @@ -1338,14 +1338,14 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( * If there is less than 1K remaining in the transfer request * copy just the data for the transfer */ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + status = sci_stp_request_pio_data_in_copy_data_buffer( stp_req, data_buffer, stp_req->pio_len); if (status == SCI_SUCCESS) stp_req->pio_len = 0; } else { /* We are transfering the whole frame so copy */ - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + status = sci_stp_request_pio_data_in_copy_data_buffer( stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); if (status == SCI_SUCCESS) @@ -1363,7 +1363,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); @@ -1375,7 +1375,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1398,7 +1398,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* Transmit data */ if (stp_req->pio_len != 0) { - status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); + status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status == SCI_SUCCESS) { if (stp_req->pio_len == 0) all_frames_transferred = true; @@ -1426,7 +1426,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status( + sci_request_set_status( ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1438,16 +1438,16 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, return status; } -static void scic_sds_stp_request_udma_complete_request( +static void sci_stp_request_udma_complete_request( struct isci_request *ireq, u32 scu_status, enum sci_status sci_status) { - scic_sds_request_set_status(ireq, scu_status, sci_status); + sci_request_set_status(ireq, scu_status, sci_status); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } -static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, +static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; @@ -1455,28 +1455,28 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct is enum sci_status status; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if ((status == SCI_SUCCESS) && (frame_header->fis_type == FIS_REGD2H)) { - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); } - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } enum sci_status -scic_sds_io_request_frame_handler(struct isci_request *ireq, +sci_io_request_frame_handler(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; @@ -1491,7 +1491,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct ssp_frame_hdr ssp_hdr; void *frame_header; - scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); @@ -1502,7 +1502,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct ssp_response_iu *resp_iu; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&resp_iu); @@ -1512,11 +1512,11 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, if (resp_iu->datapres == 0x01 || resp_iu->datapres == 0x02) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); } else - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); } else { @@ -1531,22 +1531,22 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * In any case we are done with this frame buffer return it to * the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } case SCI_REQ_TASK_WAIT_TC_RESP: - scic_sds_io_request_copy_response(ireq); + sci_io_request_copy_response(ireq); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); - scic_sds_controller_release_frame(ihost,frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; case SCI_REQ_SMP_WAIT_RESP: { struct smp_resp *rsp_hdr = &ireq->smp.rsp; void *frame_header; - scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); @@ -1557,7 +1557,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, if (rsp_hdr->frame_type == SMP_RESPONSE) { void *smp_resp; - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, &smp_resp); @@ -1567,7 +1567,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, smp_resp, word_cnt); - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); @@ -1584,31 +1584,31 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, frame_index, rsp_hdr->frame_type); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_SMP_FRM_TYPE_ERR, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } case SCI_REQ_STP_UDMA_WAIT_TC_COMP: - return scic_sds_stp_request_udma_general_frame_handler(ireq, + return sci_stp_request_udma_general_frame_handler(ireq, frame_index); case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(ireq, + status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); if (status != SCI_SUCCESS) return status; - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1618,7 +1618,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); @@ -1636,16 +1636,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ - scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -1655,7 +1655,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, "violation occurred\n", __func__, stp_req, frame_index); - scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, + sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; } @@ -1663,7 +1663,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1673,7 +1673,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); @@ -1688,7 +1688,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_PIO_SETUP: /* Get from the frame buffer the PIO Setup Data */ - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1704,7 +1704,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, /* status: 4th byte in the 3rd dword */ stp_req->status = (frame_buffer[2] >> 24) & 0xff; - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); @@ -1717,7 +1717,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); } else if (task->data_dir == DMA_TO_DEVICE) { /* Transmit data */ - status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); + status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status != SCI_SUCCESS) break; sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); @@ -1745,15 +1745,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, break; } - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.req, frame_header, frame_buffer); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1766,7 +1766,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, } /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1775,7 +1775,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; struct sata_fis_data *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); @@ -1800,14 +1800,14 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, frame_index, frame_header->fis_type); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1815,15 +1815,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, ireq->saved_rx_frame_index = frame_index; stp_req->pio_len = 0; } else { - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, + status = sci_stp_request_pio_data_in_copy_data(stp_req, (u8 *)frame_buffer); /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); } /* Check for the end of the transfer, are there more @@ -1833,7 +1833,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, return status; if ((stp_req->status & ATA_BUSY) == 0) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1848,7 +1848,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { @@ -1864,16 +1864,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -1886,7 +1886,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, stp_req, frame_index); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; @@ -1895,7 +1895,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1904,7 +1904,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * TODO: Is it even possible to get an unsolicited frame in the * aborting state? */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; default: @@ -1915,7 +1915,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, frame_index, state); - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; } } @@ -1927,7 +1927,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -1938,10 +1938,10 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq * completion. */ if (ireq->stp.rsp.fis_type == FIS_REGD2H) { - scic_sds_remote_device_suspend(ireq->target_device, + sci_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else { @@ -1965,12 +1965,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): - scic_sds_remote_device_suspend(ireq->target_device, + sci_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); /* Fall through to the default case */ default: /* All other completion status cause the IO to be complete. */ - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); break; @@ -1985,7 +1985,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); @@ -1997,7 +1997,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -2014,7 +2014,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); @@ -2025,7 +2025,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, * a NAK was received, then it is up to the user to retry the * request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -2037,7 +2037,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, } enum sci_status -scic_sds_io_request_tc_completion(struct isci_request *ireq, +sci_io_request_tc_completion(struct isci_request *ireq, u32 completion_code) { enum sci_base_request_states state; @@ -2832,7 +2832,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, ); /* complete the io request to the core. */ - scic_controller_complete_io(ihost, request->target_device, request); + sci_controller_complete_io(ihost, request->target_device, request); isci_put_device(idev); /* set terminated handle so it cannot be completed or @@ -2842,7 +2842,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, set_bit(IREQ_TERMINATED, &request->flags); } -static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) +static void sci_request_started_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct domain_device *dev = ireq->target_device->domain_dev; @@ -2879,7 +2879,7 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * } } -static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) +static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct isci_host *ihost = ireq->owning_controller; @@ -2892,7 +2892,7 @@ static void scic_sds_request_completed_state_enter(struct sci_base_state_machine isci_task_request_complete(ihost, ireq, ireq->sci_status); } -static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) +static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); @@ -2900,31 +2900,31 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine ireq->tc->abort = 1; } -static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(ireq->target_device, + sci_remote_device_set_working_request(ireq->target_device, ireq); } -static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(ireq->target_device, + sci_remote_device_set_working_request(ireq->target_device, ireq); } -static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(ireq->target_device, + sci_remote_device_set_working_request(ireq->target_device, ireq); } -static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct scu_task_context *tc = ireq->tc; @@ -2938,22 +2938,22 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet /* Clear the TC control bit */ tc->control_frame = 0; - status = scic_controller_continue_io(ireq); + status = sci_controller_continue_io(ireq); WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); } -static const struct sci_base_state scic_sds_request_state_table[] = { +static const struct sci_base_state sci_request_state_table[] = { [SCI_REQ_INIT] = { }, [SCI_REQ_CONSTRUCTED] = { }, [SCI_REQ_STARTED] = { - .enter_state = scic_sds_request_started_state_enter, + .enter_state = sci_request_started_state_enter, }, [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { - .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, + .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, }, [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, [SCI_REQ_STP_PIO_WAIT_H2D] = { - .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, + .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, }, [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, [SCI_REQ_STP_PIO_DATA_IN] = { }, @@ -2961,10 +2961,10 @@ static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, + .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, }, [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, + .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, }, [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, [SCI_REQ_TASK_WAIT_TC_COMP] = { }, @@ -2972,20 +2972,20 @@ static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_REQ_SMP_WAIT_RESP] = { }, [SCI_REQ_SMP_WAIT_TC_COMP] = { }, [SCI_REQ_COMPLETED] = { - .enter_state = scic_sds_request_completed_state_enter, + .enter_state = sci_request_completed_state_enter, }, [SCI_REQ_ABORTING] = { - .enter_state = scic_sds_request_aborting_state_enter, + .enter_state = sci_request_aborting_state_enter, }, [SCI_REQ_FINAL] = { }, }; static void -scic_sds_general_request_construct(struct isci_host *ihost, +sci_general_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { - sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT); + sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); ireq->target_device = idev; ireq->protocol = SCIC_NO_PROTOCOL; @@ -2997,7 +2997,7 @@ scic_sds_general_request_construct(struct isci_host *ihost, } static enum sci_status -scic_io_request_construct(struct isci_host *ihost, +sci_io_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -3005,7 +3005,7 @@ scic_io_request_construct(struct isci_host *ihost, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(ihost, idev, ireq); + sci_general_request_construct(ihost, idev, ireq); if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; @@ -3024,7 +3024,7 @@ scic_io_request_construct(struct isci_host *ihost, return status; } -enum sci_status scic_task_request_construct(struct isci_host *ihost, +enum sci_status sci_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq) { @@ -3032,7 +3032,7 @@ enum sci_status scic_task_request_construct(struct isci_host *ihost, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(ihost, idev, ireq); + sci_general_request_construct(ihost, idev, ireq); if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { @@ -3053,7 +3053,7 @@ static enum sci_status isci_request_ssp_request_construct( "%s: request = %p\n", __func__, request); - status = scic_io_request_construct_basic_ssp(request); + status = sci_io_request_construct_basic_ssp(request); return status; } @@ -3074,7 +3074,7 @@ static enum sci_status isci_request_stp_request_construct( */ register_fis = isci_sata_task_to_fis_copy(task); - status = scic_io_request_construct_basic_sata(request); + status = sci_io_request_construct_basic_sata(request); /* Set the ncq tag in the fis, from the queue * command in the task. @@ -3091,7 +3091,7 @@ static enum sci_status isci_request_stp_request_construct( } static enum sci_status -scic_io_request_construct_smp(struct device *dev, +sci_io_request_construct_smp(struct device *dev, struct isci_request *ireq, struct sas_task *task) { @@ -3141,8 +3141,8 @@ scic_io_request_construct_smp(struct device *dev, task_context = ireq->tc; - idev = scic_sds_request_get_device(ireq); - iport = scic_sds_request_get_port(ireq); + idev = sci_request_get_device(ireq); + iport = sci_request_get_port(ireq); /* * Fill in the TC with the its required data @@ -3152,8 +3152,8 @@ scic_io_request_construct_smp(struct device *dev, task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(ihost); - task_context->logical_port_index = scic_sds_port_get_index(iport); + sci_controller_get_protocol_engine_group(ihost); + task_context->logical_port_index = sci_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; task_context->abort = 0; task_context->valid = SCU_TASK_CONTEXT_VALID; @@ -3195,9 +3195,9 @@ scic_io_request_construct_smp(struct device *dev, task_context->task_phase = 0; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(ihost) << + (sci_controller_get_protocol_engine_group(ihost) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(iport) << + (sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* @@ -3229,7 +3229,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) struct device *dev = &ireq->isci_host->pdev->dev; enum sci_status status = SCI_FAILURE; - status = scic_io_request_construct_smp(dev, ireq, task); + status = sci_io_request_construct_smp(dev, ireq, task); if (status != SCI_SUCCESS) dev_warn(&ireq->isci_host->pdev->dev, "%s: failed with status = %d\n", @@ -3283,7 +3283,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost, return SCI_FAILURE_INSUFFICIENT_RESOURCES; } - status = scic_io_request_construct(ihost, idev, request); + status = sci_io_request_construct(ihost, idev, request); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -3388,7 +3388,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide * request was built that way (ie. * ireq->is_task_management_request is false). */ - status = scic_controller_start_task(ihost, + status = sci_controller_start_task(ihost, idev, ireq); } else { @@ -3396,7 +3396,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide } } else { /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(ihost, idev, + status = sci_controller_start_io(ihost, idev, ireq); } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 0cafcead7a01..08fcf98e70f4 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -301,75 +301,75 @@ enum sci_base_request_states { }; /** - * scic_sds_request_get_controller() - + * sci_request_get_controller() - * * This macro will return the controller for this io request object */ -#define scic_sds_request_get_controller(ireq) \ +#define sci_request_get_controller(ireq) \ ((ireq)->owning_controller) /** - * scic_sds_request_get_device() - + * sci_request_get_device() - * * This macro will return the device for this io request object */ -#define scic_sds_request_get_device(ireq) \ +#define sci_request_get_device(ireq) \ ((ireq)->target_device) /** - * scic_sds_request_get_port() - + * sci_request_get_port() - * * This macro will return the port for this io request object */ -#define scic_sds_request_get_port(ireq) \ - scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq)) +#define sci_request_get_port(ireq) \ + sci_remote_device_get_port(sci_request_get_device(ireq)) /** - * scic_sds_request_get_post_context() - + * sci_request_get_post_context() - * * This macro returns the constructed post context result for the io request. */ -#define scic_sds_request_get_post_context(ireq) \ +#define sci_request_get_post_context(ireq) \ ((ireq)->post_context) /** - * scic_sds_request_get_task_context() - + * sci_request_get_task_context() - * * This is a helper macro to return the os handle for this request object. */ -#define scic_sds_request_get_task_context(request) \ +#define sci_request_get_task_context(request) \ ((request)->task_context_buffer) /** - * scic_sds_request_set_status() - + * sci_request_set_status() - * * This macro will set the scu hardware status and sci request completion * status for an io request. */ -#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \ +#define sci_request_set_status(request, scu_status_code, sci_status_code) \ { \ (request)->scu_status = (scu_status_code); \ (request)->sci_status = (sci_status_code); \ } -enum sci_status scic_sds_request_start(struct isci_request *ireq); -enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq); +enum sci_status sci_request_start(struct isci_request *ireq); +enum sci_status sci_io_request_terminate(struct isci_request *ireq); enum sci_status -scic_sds_io_request_event_handler(struct isci_request *ireq, +sci_io_request_event_handler(struct isci_request *ireq, u32 event_code); enum sci_status -scic_sds_io_request_frame_handler(struct isci_request *ireq, +sci_io_request_frame_handler(struct isci_request *ireq, u32 frame_index); enum sci_status -scic_sds_task_request_terminate(struct isci_request *ireq); +sci_task_request_terminate(struct isci_request *ireq); extern enum sci_status -scic_sds_request_complete(struct isci_request *ireq); +sci_request_complete(struct isci_request *ireq); extern enum sci_status -scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code); +sci_io_request_tc_completion(struct isci_request *ireq, u32 code); /* XXX open code in caller */ static inline dma_addr_t -scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) +sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) { char *requested_addr = (char *)virt_addr; @@ -500,17 +500,17 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev); enum sci_status -scic_task_request_construct(struct isci_host *ihost, +sci_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq); enum sci_status -scic_task_request_construct_ssp(struct isci_request *ireq); +sci_task_request_construct_ssp(struct isci_request *ireq); enum sci_status -scic_task_request_construct_sata(struct isci_request *ireq); +sci_task_request_construct_sata(struct isci_request *ireq); void -scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); -void scic_sds_smp_request_copy_response(struct isci_request *ireq); +sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); +void sci_smp_request_copy_response(struct isci_request *ireq); static inline int isci_task_is_ncq_recovery(struct sas_task *task) { diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index 87d8cc1a6e39..47b96c21548f 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c @@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag( struct isci_request *request = task->lldd_task; register_fis->sector_count = qc->tag << 3; - scic_stp_io_request_set_ncq_tag(request, qc->tag); + sci_stp_io_request_set_ncq_tag(request, qc->tag); } /** @@ -187,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire /* core builds the protocol specific request * based on the h2d fis. */ - status = scic_task_request_construct_sata(ireq); + status = sci_task_request_construct_sata(ireq); return status; } diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 3a1fc55a7557..d040aa2f3722 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -257,12 +257,12 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(ihost, idev, tag, + status = sci_task_request_construct(ihost, idev, tag, ireq); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_task_request_construct failed - " + "%s: sci_task_request_construct failed - " "status = 0x%x\n", __func__, status); @@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, /* XXX convert to get this from task->tproto like other drivers */ if (dev->dev_type == SAS_END_DEV) { isci_tmf->proto = SAS_PROTOCOL_SSP; - status = scic_task_request_construct_ssp(ireq); + status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) return NULL; } @@ -332,7 +332,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ - status = scic_controller_start_task(ihost, idev, ireq); + status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -364,7 +364,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); - scic_controller_terminate_request(ihost, + sci_controller_terminate_request(ihost, idev, ireq); @@ -556,7 +556,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; - status = scic_controller_terminate_request(ihost, + status = sci_controller_terminate_request(ihost, idev, isci_request); } @@ -569,7 +569,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, */ if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, - "%s: scic_controller_terminate_request" + "%s: sci_controller_terminate_request" " returned = 0x%x\n", __func__, status); @@ -1251,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost, /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; - scic_controller_complete_io(ihost, ireq->target_device, ireq); + sci_controller_complete_io(ihost, ireq->target_device, ireq); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ @@ -1514,12 +1514,12 @@ static int isci_reset_device(struct isci_host *ihost, dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_reset(idev); + status = sci_remote_device_reset(idev); if (status != SCI_SUCCESS) { spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_warn(&ihost->pdev->dev, - "%s: scic_remote_device_reset(%p) returned %d!\n", + "%s: sci_remote_device_reset(%p) returned %d!\n", __func__, idev, status); return TMF_RESP_FUNC_FAILED; @@ -1540,7 +1540,7 @@ static int isci_reset_device(struct isci_host *ihost, /* Since all pending TCs have been cleaned, resume the RNC. */ spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_reset_complete(idev); + status = sci_remote_device_reset_complete(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* If this is a device on an expander, bring the phy back up. */ @@ -1560,7 +1560,7 @@ static int isci_reset_device(struct isci_host *ihost, if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_remote_device_reset_complete(%p) " + "%s: sci_remote_device_reset_complete(%p) " "returned %d!\n", __func__, idev, status); } diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index a0e6f89fc6a1..e9e1e2abacb9 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -57,10 +57,10 @@ #include "unsolicited_frame_control.h" #include "registers.h" -int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) +int sci_unsolicited_frame_control_construct(struct isci_host *ihost) { - struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control; - struct scic_sds_unsolicited_frame *uf; + struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; + struct sci_unsolicited_frame *uf; u32 buf_len, header_len, i; dma_addr_t dma; size_t size; @@ -139,23 +139,14 @@ int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) return 0; } -/** - * This method returns the frame header for the specified frame index. - * @uf_control: - * @frame_index: - * @frame_header: - * - * enum sci_status - */ -enum sci_status scic_sds_unsolicited_frame_control_get_header( - struct scic_sds_unsolicited_frame_control *uf_control, - u32 frame_index, - void **frame_header) +enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_header) { if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { - /* - * Skip the first word in the frame since this is a controll word used - * by the hardware. */ + /* Skip the first word in the frame since this is a controll word used + * by the hardware. + */ *frame_header = &uf_control->buffers.array[frame_index].header->data; return SCI_SUCCESS; @@ -164,18 +155,9 @@ enum sci_status scic_sds_unsolicited_frame_control_get_header( return SCI_FAILURE_INVALID_PARAMETER_VALUE; } -/** - * This method returns the frame buffer for the specified frame index. - * @uf_control: - * @frame_index: - * @frame_buffer: - * - * enum sci_status - */ -enum sci_status scic_sds_unsolicited_frame_control_get_buffer( - struct scic_sds_unsolicited_frame_control *uf_control, - u32 frame_index, - void **frame_buffer) +enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_buffer) { if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { *frame_buffer = uf_control->buffers.array[frame_index].buffer; @@ -186,19 +168,8 @@ enum sci_status scic_sds_unsolicited_frame_control_get_buffer( return SCI_FAILURE_INVALID_PARAMETER_VALUE; } -/** - * This method releases the frame once this is done the frame is available for - * re-use by the hardware. The data contained in the frame header and frame - * buffer is no longer valid. - * @uf_control: This parameter specifies the UF control object - * @frame_index: This parameter specifies the frame index to attempt to release. - * - * This method returns an indication to the caller as to whether the - * unsolicited frame get pointer should be updated. - */ -bool scic_sds_unsolicited_frame_control_release_frame( - struct scic_sds_unsolicited_frame_control *uf_control, - u32 frame_index) +bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index) { u32 frame_get; u32 frame_cycle; diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index c0285a3db562..b849a84af34f 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -92,12 +92,12 @@ enum unsolicited_frame_state { }; /** - * struct scic_sds_unsolicited_frame - + * struct sci_unsolicited_frame - * * This is the unsolicited frame data structure it acts as the container for * the current frame state, frame header and frame buffer. */ -struct scic_sds_unsolicited_frame { +struct sci_unsolicited_frame { /** * This field contains the current frame state */ @@ -116,11 +116,11 @@ struct scic_sds_unsolicited_frame { }; /** - * struct scic_sds_uf_header_array - + * struct sci_uf_header_array - * * This structure contains all of the unsolicited frame header information. */ -struct scic_sds_uf_header_array { +struct sci_uf_header_array { /** * This field is represents a virtual pointer to the start * address of the UF address table. The table contains @@ -137,19 +137,19 @@ struct scic_sds_uf_header_array { }; /** - * struct scic_sds_uf_buffer_array - + * struct sci_uf_buffer_array - * * This structure contains all of the unsolicited frame buffer (actual payload) * information. */ -struct scic_sds_uf_buffer_array { +struct sci_uf_buffer_array { /** * This field is the unsolicited frame data its used to manage * the data for the unsolicited frame requests. It also represents * the virtual address location that corresponds to the * physical_address field. */ - struct scic_sds_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; + struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; /** * This field specifies the physical address location for the UF @@ -159,13 +159,13 @@ struct scic_sds_uf_buffer_array { }; /** - * struct scic_sds_uf_address_table_array - + * struct sci_uf_address_table_array - * * This object maintains all of the unsolicited frame address table specific * data. The address table is a collection of 64-bit pointers that point to * 1KB buffers into which the silicon will DMA unsolicited frames. */ -struct scic_sds_uf_address_table_array { +struct sci_uf_address_table_array { /** * This field represents a virtual pointer that refers to the * starting address of the UF address table. @@ -182,11 +182,11 @@ struct scic_sds_uf_address_table_array { }; /** - * struct scic_sds_unsolicited_frame_control - + * struct sci_unsolicited_frame_control - * * This object contains all of the data necessary to handle unsolicited frames. */ -struct scic_sds_unsolicited_frame_control { +struct sci_unsolicited_frame_control { /** * This field is the software copy of the unsolicited frame queue * get pointer. The controller object writes this value to the @@ -198,38 +198,38 @@ struct scic_sds_unsolicited_frame_control { * This field contains all of the unsolicited frame header * specific fields. */ - struct scic_sds_uf_header_array headers; + struct sci_uf_header_array headers; /** * This field contains all of the unsolicited frame buffer * specific fields. */ - struct scic_sds_uf_buffer_array buffers; + struct sci_uf_buffer_array buffers; /** * This field contains all of the unsolicited frame address table * specific fields. */ - struct scic_sds_uf_address_table_array address_table; + struct sci_uf_address_table_array address_table; }; struct isci_host; -int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost); +int sci_unsolicited_frame_control_construct(struct isci_host *ihost); -enum sci_status scic_sds_unsolicited_frame_control_get_header( - struct scic_sds_unsolicited_frame_control *uf_control, +enum sci_status sci_unsolicited_frame_control_get_header( + struct sci_unsolicited_frame_control *uf_control, u32 frame_index, void **frame_header); -enum sci_status scic_sds_unsolicited_frame_control_get_buffer( - struct scic_sds_unsolicited_frame_control *uf_control, +enum sci_status sci_unsolicited_frame_control_get_buffer( + struct sci_unsolicited_frame_control *uf_control, u32 frame_index, void **frame_buffer); -bool scic_sds_unsolicited_frame_control_release_frame( - struct scic_sds_unsolicited_frame_control *uf_control, +bool sci_unsolicited_frame_control_release_frame( + struct sci_unsolicited_frame_control *uf_control, u32 frame_index); #endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */ |